entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
---|---|---|---|---|---|---|---|---|---|---|---|
LastBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/ey/cey4yc74rqvkmtcxtse2vt3dw6pfdi3zwtwezx7cdzkykhz4kzp7.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# x_1 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf1, 16, grid=grid(16), stream=stream0)
return (buf1, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
class BatchNormLayer(nn.Module):
"""Implements batch normalization layer."""
def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon
=1e-05):
"""Initializes with basic settings.
Args:
channels: Number of channels of the input tensor.
gamma: Whether the scale (weight) of the affine mapping is learnable.
beta: Whether the center (bias) of the affine mapping is learnable.
decay: Decay factor for moving average operations in this layer.
epsilon: A value added to the denominator for numerical stability.
"""
super().__init__()
self.bn = nn.BatchNorm2d(num_features=channels, affine=True,
track_running_stats=True, momentum=1 - decay, eps=epsilon)
self.bn.weight.requires_grad = gamma
self.bn.bias.requires_grad = beta
def forward(self, x):
return self.bn(x)
class LastBlock(nn.Module):
"""Implements the last block, which is a dense block."""
def __init__(self, in_channels, out_channels, use_wscale=False,
wscale_gain=1.0, use_bn=False):
super().__init__()
self.fc = nn.Linear(in_features=in_channels, out_features=
out_channels, bias=False)
self.scale = wscale_gain / np.sqrt(in_channels) if use_wscale else 1.0
self.bn = BatchNormLayer(channels=out_channels
) if use_bn else nn.Identity()
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.fc(x) * self.scale
x = x.view(x.shape[0], x.shape[1], 1, 1)
return self.bn(x).view(x.shape[0], x.shape[1])
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
return buf1, primals_1
class BatchNormLayer(nn.Module):
"""Implements batch normalization layer."""
def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon
=1e-05):
"""Initializes with basic settings.
Args:
channels: Number of channels of the input tensor.
gamma: Whether the scale (weight) of the affine mapping is learnable.
beta: Whether the center (bias) of the affine mapping is learnable.
decay: Decay factor for moving average operations in this layer.
epsilon: A value added to the denominator for numerical stability.
"""
super().__init__()
self.bn = nn.BatchNorm2d(num_features=channels, affine=True,
track_running_stats=True, momentum=1 - decay, eps=epsilon)
self.bn.weight.requires_grad = gamma
self.bn.bias.requires_grad = beta
def forward(self, x):
return self.bn(x)
class LastBlockNew(nn.Module):
"""Implements the last block, which is a dense block."""
def __init__(self, in_channels, out_channels, use_wscale=False,
wscale_gain=1.0, use_bn=False):
super().__init__()
self.fc = nn.Linear(in_features=in_channels, out_features=
out_channels, bias=False)
self.scale = wscale_gain / np.sqrt(in_channels) if use_wscale else 1.0
self.bn = BatchNormLayer(channels=out_channels
) if use_bn else nn.Identity()
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| thunguyenphuoc/idinvert_pytorch | LastBlock | false | 13,133 | [
"MIT"
]
| 0 | bf8a81e75d193c22a05d9c4457907dc468389766 | https://github.com/thunguyenphuoc/idinvert_pytorch/tree/bf8a81e75d193c22a05d9c4457907dc468389766 |
MinMaxNorm | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/32/c32i2334e2cubbxcfo3qfe3hcxrbbjwe3jyvzl2kvxsr6gdadsdu.py
# Topologically Sorted Source Nodes: [sub, mul, truediv, add], Original ATen: [aten.sub, aten.mul, aten.div, aten.add]
# Source node to ATen node mapping:
# add => add
# mul => mul
# sub => sub
# truediv => div
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 4), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 0), kwargs = {})
triton_poi_fused_add_div_mul_sub_0 = async_compile.triton('triton_poi_fused_add_div_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 4.0
tmp2 = tmp0 - tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp5 = float("inf")
tmp6 = tmp4 * tmp5
tmp7 = 0.0
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, mul, truediv, add], Original ATen: [aten.sub, aten.mul, aten.div, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mul_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MinMaxNorm(nn.Module):
def __init__(self, min, max, a=0, b=1):
super(MinMaxNorm, self).__init__()
self.min, self.max = min, max
self.a, self.b = a, b
def forward(self, x):
return self.a + (x - self.min) * (self.b - self.a) / (self.max -
self.min)
def inverse(self, x):
return self.min + (x - self.a) * (self.max - self.min) / (self.b -
self.a)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'min': 4, 'max': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 4.0
tmp2 = tmp0 - tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp5 = float('inf')
tmp6 = tmp4 * tmp5
tmp7 = 0.0
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_sub_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MinMaxNormNew(nn.Module):
def __init__(self, min, max, a=0, b=1):
super(MinMaxNormNew, self).__init__()
self.min, self.max = min, max
self.a, self.b = a, b
def inverse(self, x):
return self.min + (x - self.a) * (self.max - self.min) / (self.b -
self.a)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| yhgon/speedyspeech | MinMaxNorm | false | 13,134 | [
"BSD-3-Clause"
]
| 0 | 574c6a94091431f313e2aae8e154b8c80e6908ce | https://github.com/yhgon/speedyspeech/tree/574c6a94091431f313e2aae8e154b8c80e6908ce |
DisConvModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/pl/cpl2phcxgwq27h7hrkttckk2rwsgsjtseizody7iq5gpui6mr7xd.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/o7/co7q4vm5trvbqdcumxlbkclncooaexuslaaazlkr55noaxme4mfi.py
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_2 => convolution_1
# x_3 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/xs/cxsybwlktadgfr5i5ocek54wnwiwvwjfehkwokwt2npfoitrjcek.py
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_4 => convolution_2
# x_5 => gt_2, mul_2, where_2
# Graph fragment:
# %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_6, %primals_7, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.2), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {})
triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/lr/clrgts2iohcmp3svgdcrrk4uszvars7l4nnlqq72v6xk4x7pzlju.py
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# x_6 => convolution_3
# x_7 => gt_3, mul_3, where_3
# Graph fragment:
# %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_8, %primals_9, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.2), kwargs = {})
# %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {})
# %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_3, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(in_out_ptr0 + (x2), tmp7, xmask)
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 5, 5), (100, 25, 5, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (8, 4, 5, 5), (100, 25, 5, 1))
assert_size_stride(primals_5, (8, ), (1, ))
assert_size_stride(primals_6, (16, 8, 5, 5), (200, 25, 5, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (16, 16, 5, 5), (400, 25, 5, 1))
assert_size_stride(primals_9, (16, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf1, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 8, 1, 1), (8, 1, 1, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_1.run(buf3, primals_5, 32, grid=grid(32), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 1, 1), (16, 1, 1, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_2.run(buf5, primals_7, 64, grid=grid(64), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 16, 1, 1), (16, 1, 1, 1))
buf7 = buf6; del buf6 # reuse
buf8 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3.run(buf7, primals_9, buf8, 64, grid=grid(64), stream=stream0)
del primals_9
return (buf7, primals_1, primals_2, primals_4, primals_6, primals_8, buf1, buf3, buf5, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 5, 5), (100, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((8, 4, 5, 5), (100, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 8, 5, 5), (200, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((16, 16, 5, 5), (400, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn.utils import spectral_norm as spectral_norm_fn
from torch.nn.utils import weight_norm as weight_norm_fn
def dis_conv(input_dim, output_dim, kernel_size=5, stride=2, padding=0,
rate=1, activation='lrelu'):
return Conv2dBlock(input_dim, output_dim, kernel_size, stride,
conv_padding=padding, dilation=rate, activation=activation)
class Conv2dBlock(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, stride, padding=
0, conv_padding=0, dilation=1, weight_norm='none', norm='none',
activation='relu', pad_type='zero', transpose=False):
super(Conv2dBlock, self).__init__()
self.use_bias = True
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
elif pad_type == 'none':
self.pad = None
else:
assert 0, 'Unsupported padding type: {}'.format(pad_type)
norm_dim = output_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
if weight_norm == 'sn':
self.weight_norm = spectral_norm_fn
elif weight_norm == 'wn':
self.weight_norm = weight_norm_fn
elif weight_norm == 'none':
self.weight_norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(weight_norm)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'elu':
self.activation = nn.ELU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, 'Unsupported activation: {}'.format(activation)
if transpose:
self.conv = nn.ConvTranspose2d(input_dim, output_dim,
kernel_size, stride, padding=conv_padding, output_padding=
conv_padding, dilation=dilation, bias=self.use_bias)
else:
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size,
stride, padding=conv_padding, dilation=dilation, bias=self.
use_bias)
if self.weight_norm:
self.conv = self.weight_norm(self.conv)
def forward(self, x):
if self.pad:
x = self.conv(self.pad(x))
else:
x = self.conv(x)
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class DisConvModule(nn.Module):
def __init__(self, input_dim, cnum, use_cuda=True, device_ids=None):
super(DisConvModule, self).__init__()
self.use_cuda = use_cuda
self.device_ids = device_ids
self.conv1 = dis_conv(input_dim, cnum, 5, 2, 2)
self.conv2 = dis_conv(cnum, cnum * 2, 5, 2, 2)
self.conv3 = dis_conv(cnum * 2, cnum * 4, 5, 2, 2)
self.conv4 = dis_conv(cnum * 4, cnum * 4, 5, 2, 2)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'cnum': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn.utils import spectral_norm as spectral_norm_fn
from torch.nn.utils import weight_norm as weight_norm_fn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(in_out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 5, 5), (100, 25, 5, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (8, 4, 5, 5), (100, 25, 5, 1))
assert_size_stride(primals_5, (8,), (1,))
assert_size_stride(primals_6, (16, 8, 5, 5), (200, 25, 5, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (16, 16, 5, 5), (400, 25, 5, 1))
assert_size_stride(primals_9, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2,
2), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(64)](buf1, primals_3,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 8, 1, 1), (8, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_leaky_relu_1[grid(32)](buf3, primals_5,
32, XBLOCK=32, num_warps=1, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 1, 1), (16, 1, 1, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_leaky_relu_2[grid(64)](buf5, primals_7,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 16, 1, 1), (16, 1, 1, 1))
buf7 = buf6
del buf6
buf8 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 1, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3[grid(64)
](buf7, primals_9, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_9
return (buf7, primals_1, primals_2, primals_4, primals_6, primals_8,
buf1, buf3, buf5, buf8)
def dis_conv(input_dim, output_dim, kernel_size=5, stride=2, padding=0,
rate=1, activation='lrelu'):
return Conv2dBlock(input_dim, output_dim, kernel_size, stride,
conv_padding=padding, dilation=rate, activation=activation)
class Conv2dBlock(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, stride, padding=
0, conv_padding=0, dilation=1, weight_norm='none', norm='none',
activation='relu', pad_type='zero', transpose=False):
super(Conv2dBlock, self).__init__()
self.use_bias = True
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
elif pad_type == 'none':
self.pad = None
else:
assert 0, 'Unsupported padding type: {}'.format(pad_type)
norm_dim = output_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
if weight_norm == 'sn':
self.weight_norm = spectral_norm_fn
elif weight_norm == 'wn':
self.weight_norm = weight_norm_fn
elif weight_norm == 'none':
self.weight_norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(weight_norm)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'elu':
self.activation = nn.ELU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, 'Unsupported activation: {}'.format(activation)
if transpose:
self.conv = nn.ConvTranspose2d(input_dim, output_dim,
kernel_size, stride, padding=conv_padding, output_padding=
conv_padding, dilation=dilation, bias=self.use_bias)
else:
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size,
stride, padding=conv_padding, dilation=dilation, bias=self.
use_bias)
if self.weight_norm:
self.conv = self.weight_norm(self.conv)
def forward(self, x):
if self.pad:
x = self.conv(self.pad(x))
else:
x = self.conv(x)
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class DisConvModuleNew(nn.Module):
def __init__(self, input_dim, cnum, use_cuda=True, device_ids=None):
super(DisConvModuleNew, self).__init__()
self.use_cuda = use_cuda
self.device_ids = device_ids
self.conv1 = dis_conv(input_dim, cnum, 5, 2, 2)
self.conv2 = dis_conv(cnum, cnum * 2, 5, 2, 2)
self.conv3 = dis_conv(cnum * 2, cnum * 4, 5, 2, 2)
self.conv4 = dis_conv(cnum * 4, cnum * 4, 5, 2, 2)
def forward(self, input_0):
primals_2 = self.conv1.conv.weight
primals_3 = self.conv1.conv.bias
primals_4 = self.conv2.conv.weight
primals_5 = self.conv2.conv.bias
primals_6 = self.conv3.conv.weight
primals_7 = self.conv3.conv.bias
primals_8 = self.conv4.conv.weight
primals_9 = self.conv4.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| xy-gao/generative-inpainting-pytorch | DisConvModule | false | 13,135 | [
"MIT"
]
| 0 | 24f2183a11fd48a0383c9862e3d1a6354fbb6cda | https://github.com/xy-gao/generative-inpainting-pytorch/tree/24f2183a11fd48a0383c9862e3d1a6354fbb6cda |
CriticVanilla | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/c4/cc4khg7fwbxxm2fufox7nnkf4gfybrmj5ir2tx3zuxfioc5b2dya.py
# Topologically Sorted Source Nodes: [xu], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# xu => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/m6/cm6ozsdmt5vl54fxwk7cgktzswysgn2c37vsaybpucplzehkrnnz.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = (xindex // 1600)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/64/c64g5uxk2a5hbzuhd6oikla2gb5eyfjjb6kbh7btzswha52gl5ex.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = (xindex // 1200)
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + (1216*x2)), tmp4, xmask)
tl.store(out_ptr1 + (x3 + (1280*x2)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/4h/c4h6r6vefoeuinm5eqv2d6wqmfj2mnjacalp633y3m6bnseb2bnk.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.relu, aten.view]
# Source node to ATen node mapping:
# x_1 => relu_1
# x_2 => view_4
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %view_4 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu_1, [64, 300]), kwargs = {})
triton_poi_fused_relu_view_3 = async_compile.triton('triton_poi_fused_relu_view_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_view_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = (xindex // 300)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (300*(x1 % 4)) + (1216*(x1 // 4))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (400, 8), (8, 1))
assert_size_stride(primals_4, (400, ), (1, ))
assert_size_stride(primals_5, (300, 400), (400, 1))
assert_size_stride(primals_6, (300, ), (1, ))
assert_size_stride(primals_7, (1, 300), (300, 1))
assert_size_stride(primals_8, (1, ), (1, ))
assert_size_stride(primals_9, (400, 8), (8, 1))
assert_size_stride(primals_10, (400, ), (1, ))
assert_size_stride(primals_11, (300, 400), (400, 1))
assert_size_stride(primals_12, (300, ), (1, ))
assert_size_stride(primals_13, (1, 300), (300, 1))
assert_size_stride(primals_14, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [xu], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 400), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf1 # reuse
buf18 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_4, buf18, 25600, grid=grid(25600), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 400), (400, 1), 0), reinterpret_tensor(primals_5, (400, 300), (1, 400), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32)
buf17 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf3, primals_6, buf4, buf17, 19200, grid=grid(19200), stream=stream0)
del primals_6
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.relu, aten.view]
triton_poi_fused_relu_view_3.run(buf4, buf5, 19200, grid=grid(19200), stream=stream0)
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf5, reinterpret_tensor(primals_7, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf7)
del primals_8
buf8 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_9, (8, 400), (1, 8), 0), out=buf8)
del primals_9
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf8 # reuse
buf16 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf9, primals_10, buf16, 25600, grid=grid(25600), stream=stream0)
del primals_10
buf10 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf9, (64, 400), (400, 1), 0), reinterpret_tensor(primals_11, (400, 300), (1, 400), 0), out=buf10)
buf11 = buf4; del buf4 # reuse
buf15 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf10, primals_12, buf11, buf15, 19200, grid=grid(19200), stream=stream0)
del primals_12
buf12 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.relu, aten.view]
triton_poi_fused_relu_view_3.run(buf11, buf12, 19200, grid=grid(19200), stream=stream0)
del buf11
buf14 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_14, buf12, reinterpret_tensor(primals_13, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf14)
del primals_14
return (reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(buf14, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(buf2, (64, 400), (400, 1), 0), buf5, reinterpret_tensor(buf9, (64, 400), (400, 1), 0), buf12, primals_13, buf15, primals_11, buf16, primals_7, buf17, primals_5, buf18, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((400, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 300), (300, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((400, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((1, 300), (300, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class MLPBase(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(MLPBase, self).__init__()
self.l1 = nn.Linear(num_inputs, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, num_outputs)
def forward(self, inputs):
x = F.relu(self.l1(inputs))
x = F.relu(self.l2(x))
x = self.l3(x)
return x
class CriticVanilla(nn.Module):
"""a vanilla critic module that outputs a node's q-values given only its observation and action(no message between nodes)"""
def __init__(self, state_dim, action_dim):
super(CriticVanilla, self).__init__()
self.baseQ1 = MLPBase(state_dim + action_dim, 1)
self.baseQ2 = MLPBase(state_dim + action_dim, 1)
def forward(self, x, u):
xu = torch.cat([x, u], -1)
x1 = self.baseQ1(xu)
x2 = self.baseQ2(xu)
return x1, x2
def Q1(self, x, u):
xu = torch.cat([x, u], -1)
x1 = self.baseQ1(xu)
return x1
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = xindex // 1200
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = xindex // 300
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (400, 8), (8, 1))
assert_size_stride(primals_4, (400,), (1,))
assert_size_stride(primals_5, (300, 400), (400, 1))
assert_size_stride(primals_6, (300,), (1,))
assert_size_stride(primals_7, (1, 300), (300, 1))
assert_size_stride(primals_8, (1,), (1,))
assert_size_stride(primals_9, (400, 8), (8, 1))
assert_size_stride(primals_10, (400,), (1,))
assert_size_stride(primals_11, (300, 400), (400, 1))
assert_size_stride(primals_12, (300,), (1,))
assert_size_stride(primals_13, (1, 300), (300, 1))
assert_size_stride(primals_14, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 400), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf1
buf18 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(25600)](buf2,
primals_4, buf18, 25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_5, (400, 300), (1, 400), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1),
torch.float32)
buf17 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(19200)](buf3,
primals_6, buf4, buf17, 19200, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_6
buf5 = buf3
del buf3
triton_poi_fused_relu_view_3[grid(19200)](buf4, buf5, 19200, XBLOCK
=256, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf5, reinterpret_tensor(primals_7,
(300, 1), (1, 300), 0), alpha=1, beta=1, out=buf7)
del primals_8
buf8 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_9, (8, 400), (1, 8), 0), out=buf8)
del primals_9
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf8
buf16 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(25600)](buf9,
primals_10, buf16, 25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_10
buf10 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_11, (400, 300), (1, 400), 0), out=buf10)
buf11 = buf4
del buf4
buf15 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(19200)](buf10,
primals_12, buf11, buf15, 19200, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_12
buf12 = buf10
del buf10
triton_poi_fused_relu_view_3[grid(19200)](buf11, buf12, 19200,
XBLOCK=256, num_warps=4, num_stages=1)
del buf11
buf14 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_14, buf12, reinterpret_tensor(
primals_13, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf14)
del primals_14
return (reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0),
reinterpret_tensor(buf14, (4, 4, 4, 1), (16, 4, 1, 1), 0),
reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(
buf2, (64, 400), (400, 1), 0), buf5, reinterpret_tensor(buf9, (64,
400), (400, 1), 0), buf12, primals_13, buf15, primals_11, buf16,
primals_7, buf17, primals_5, buf18)
class MLPBase(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(MLPBase, self).__init__()
self.l1 = nn.Linear(num_inputs, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, num_outputs)
def forward(self, inputs):
x = F.relu(self.l1(inputs))
x = F.relu(self.l2(x))
x = self.l3(x)
return x
class CriticVanillaNew(nn.Module):
"""a vanilla critic module that outputs a node's q-values given only its observation and action(no message between nodes)"""
def __init__(self, state_dim, action_dim):
super(CriticVanillaNew, self).__init__()
self.baseQ1 = MLPBase(state_dim + action_dim, 1)
self.baseQ2 = MLPBase(state_dim + action_dim, 1)
def Q1(self, x, u):
xu = torch.cat([x, u], -1)
x1 = self.baseQ1(xu)
return x1
def forward(self, input_0, input_1):
primals_3 = self.baseQ1.l1.weight
primals_4 = self.baseQ1.l1.bias
primals_5 = self.baseQ1.l2.weight
primals_6 = self.baseQ1.l2.bias
primals_7 = self.baseQ1.l3.weight
primals_8 = self.baseQ1.l3.bias
primals_9 = self.baseQ2.l1.weight
primals_10 = self.baseQ2.l1.bias
primals_11 = self.baseQ2.l2.weight
primals_12 = self.baseQ2.l2.bias
primals_13 = self.baseQ2.l3.weight
primals_14 = self.baseQ2.l3.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0], output[1]
| yangfanthu/modular-rl | CriticVanilla | false | 13,136 | [
"BSD-2-Clause"
]
| 0 | 25c599bab641a7e732dbaf116cd240fa2358f113 | https://github.com/yangfanthu/modular-rl/tree/25c599bab641a7e732dbaf116cd240fa2358f113 |
CFRB | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/nr/cnroakuucxovr2wbbiy63dk55fg5zyu3u6ygcqhb7ehcuitmnl6v.py
# Topologically Sorted Source Nodes: [conv2d_1, add, x], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# add => add
# conv2d_1 => convolution_1
# x => gt, mul, where
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_3), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.05), kwargs = {})
# %where : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add, %mul), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 819200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 50
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.05
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(in_out_ptr0 + (x3), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/mq/cmqfuii5lrrx42n457oyub5nudqfwq6fa3n2eu5rxws4jpfb6gdl.py
# Topologically Sorted Source Nodes: [cat, x_4], Original ATen: [aten.cat, aten.leaky_relu]
# Source node to ATen node mapping:
# cat => cat
# x_4 => gt_3, mul_3, where_3
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution, %convolution_2, %convolution_4, %convolution_6], 1), kwargs = {})
# %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%cat, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%cat, 0.05), kwargs = {})
# %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %cat, %mul_3), kwargs = {})
triton_poi_fused_cat_leaky_relu_1 = async_compile.triton('triton_poi_fused_cat_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_leaky_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_leaky_relu_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK : tl.constexpr):
xnumel = 1638400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 100
x0 = xindex % 4096
x2 = (xindex // 409600)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 25, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (102400*x2)), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 50, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr2 + (x0 + (4096*((-25) + x1)) + (102400*x2)), tmp13, other=0.0)
tmp15 = tl.load(in_ptr3 + ((-25) + x1), tmp13, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tmp20 = tl.full([1], 75, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr4 + (x0 + (4096*((-50) + x1)) + (102400*x2)), tmp22, other=0.0)
tmp24 = tl.load(in_ptr5 + ((-50) + x1), tmp22, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp22, tmp25, tmp26)
tmp28 = tmp0 >= tmp20
tmp29 = tl.full([1], 100, tl.int64)
tmp30 = tmp0 < tmp29
tmp31 = tl.load(in_ptr6 + (x0 + (4096*((-75) + x1)) + (102400*x2)), tmp28, other=0.0)
tmp32 = tl.load(in_ptr7 + ((-75) + x1), tmp28, eviction_policy='evict_last', other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp28, tmp33, tmp34)
tmp36 = tl.where(tmp22, tmp27, tmp35)
tmp37 = tl.where(tmp13, tmp18, tmp36)
tmp38 = tl.where(tmp4, tmp9, tmp37)
tmp39 = 0.0
tmp40 = tmp38 > tmp39
tmp41 = 0.05
tmp42 = tmp38 * tmp41
tmp43 = tl.where(tmp40, tmp38, tmp42)
tl.store(in_out_ptr0 + (x3), tmp43, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/qt/cqt6k7jvb6a43mqo7gqxy7acaicyy3yhvzlytummkmdqwbvyliwj.py
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_7 => convolution_7
# Graph fragment:
# %convolution_7 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_3, %primals_16, %primals_17, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 819200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 50
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/36/c36pvwfsscoy2kciztfy556cxjgqfdwssn627inbohwq3umljdgi.py
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x1 => convolution_8
# Graph fragment:
# %convolution_8 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_7, %primals_18, %primals_19, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 196608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 12
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/fu/cfujz3wuioqd7ngkxyhek3tobqstshjil2g7hefqc2j3yuvr2rsz.py
# Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_9 => convolution_9
# Graph fragment:
# %convolution_9 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_8, %primals_20, %primals_21, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 46128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 961) % 12
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/i7/ci736ek3g3ht7jxvp6kl7zivp56mrcy2prphwwd3mlkkhouwqdjc.py
# Topologically Sorted Source Nodes: [conv2d_10, x2_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_10 => convolution_10
# x2_1 => relu
# Graph fragment:
# %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_10,), kwargs = {})
triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3888
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 81) % 12
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/gw/cgwa2fue73qo3kqylgd3lw7k2zlau5lhbghsgqy2hfpwmzohwjge.py
# Topologically Sorted Source Nodes: [x2_3], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x2_3 => convert_element_type_1
# Graph fragment:
# %convert_element_type_1 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
triton_poi_fused__to_copy_6 = async_compile.triton('triton_poi_fused__to_copy_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_6(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/oo/cooajaomyurcbolulushutzju7re4egukfnr6ni2d2dy3rlpbqnd.py
# Topologically Sorted Source Nodes: [x2_3], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x2_3 => add_4, clamp_max
# Graph fragment:
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 1), kwargs = {})
# %clamp_max : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_4, 8), kwargs = {})
triton_poi_fused_add_clamp_7 = async_compile.triton('triton_poi_fused_add_clamp_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_7(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/h4/ch4x5qnvs6ohd3eb33sdszmn5mvzcobv4diiddrco6qruoadqzbi.py
# Topologically Sorted Source Nodes: [x2_3], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# x2_3 => add_3, clamp_max_2, clamp_min, clamp_min_2, convert_element_type, iota, mul_4, sub, sub_2
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, 0.140625), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_4, 0.5), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_8 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_8(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = 1.0
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tl.store(out_ptr0 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/2q/c2qn6yigk4oejj4pzj2jvqaczqcrwbhw6q4ilakwk5g6ehtjtlv3.py
# Topologically Sorted Source Nodes: [conv2d_12, x2_3, conv2d_13, add_3], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# add_3 => add_10
# conv2d_12 => convolution_12
# conv2d_13 => convolution_13
# x2_3 => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_7, add_8, add_9, mul_6, mul_7, mul_8, sub_3, sub_4, sub_6
# Graph fragment:
# %convolution_12 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_26, %primals_27, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_12, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_12, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_12, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_12, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {})
# %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_6), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_7), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_8, %add_7), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %mul_8), kwargs = {})
# %convolution_13 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_8, %primals_28, %primals_29, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_10 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_9, %convolution_13), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_mul_sub_9 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_sub_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: '*fp32', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_sub_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK : tl.constexpr):
xnumel = 196608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 64
x0 = xindex % 64
x5 = (xindex // 4096)
x2 = (xindex // 4096) % 12
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + (x1), None, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr8 + (x6), None)
tmp38 = tl.load(in_ptr9 + (x2), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 9, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (9*tmp4) + (81*x5)), None, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + (9*tmp4) + (81*x5)), None, eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + (9*tmp25) + (81*x5)), None, eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + (9*tmp25) + (81*x5)), None, eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tmp36 = tmp21 + tmp35
tmp39 = tmp37 + tmp38
tmp40 = tmp36 + tmp39
tl.store(in_out_ptr0 + (x6), tmp40, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/5d/c5dalygwh4tvvl5f4ui4h6g6bheefeer6yzbzd3alo7rgx4zeufi.py
# Topologically Sorted Source Nodes: [x2_4, sigmoid, x_5], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# x2_4 => convolution_14
# x_5 => mul_9
# Graph fragment:
# %convolution_14 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%add_10, %primals_30, %primals_31, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_14,), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_7, %sigmoid), kwargs = {})
triton_poi_fused_convolution_mul_sigmoid_10 = async_compile.triton('triton_poi_fused_convolution_mul_sigmoid_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sigmoid_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_10(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 819200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 50
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), None)
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp2)
tmp5 = tmp3 * tmp4
tl.store(in_out_ptr0 + (x3), tmp2, None)
tl.store(out_ptr0 + (x3), tmp5, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31 = args
args.clear()
assert_size_stride(primals_1, (25, 50, 1, 1), (50, 1, 1, 1))
assert_size_stride(primals_2, (25, ), (1, ))
assert_size_stride(primals_3, (4, 50, 64, 64), (204800, 4096, 64, 1))
assert_size_stride(primals_4, (50, 50, 3, 3), (450, 9, 3, 1))
assert_size_stride(primals_5, (50, ), (1, ))
assert_size_stride(primals_6, (25, 50, 1, 1), (50, 1, 1, 1))
assert_size_stride(primals_7, (25, ), (1, ))
assert_size_stride(primals_8, (50, 50, 3, 3), (450, 9, 3, 1))
assert_size_stride(primals_9, (50, ), (1, ))
assert_size_stride(primals_10, (25, 50, 1, 1), (50, 1, 1, 1))
assert_size_stride(primals_11, (25, ), (1, ))
assert_size_stride(primals_12, (50, 50, 3, 3), (450, 9, 3, 1))
assert_size_stride(primals_13, (50, ), (1, ))
assert_size_stride(primals_14, (25, 50, 3, 3), (450, 9, 3, 1))
assert_size_stride(primals_15, (25, ), (1, ))
assert_size_stride(primals_16, (50, 100, 1, 1), (100, 1, 1, 1))
assert_size_stride(primals_17, (50, ), (1, ))
assert_size_stride(primals_18, (12, 50, 1, 1), (50, 1, 1, 1))
assert_size_stride(primals_19, (12, ), (1, ))
assert_size_stride(primals_20, (12, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_21, (12, ), (1, ))
assert_size_stride(primals_22, (12, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_23, (12, ), (1, ))
assert_size_stride(primals_24, (12, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_25, (12, ), (1, ))
assert_size_stride(primals_26, (12, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_27, (12, ), (1, ))
assert_size_stride(primals_28, (12, 12, 1, 1), (12, 1, 1, 1))
assert_size_stride(primals_29, (12, ), (1, ))
assert_size_stride(primals_30, (50, 12, 1, 1), (12, 1, 1, 1))
assert_size_stride(primals_31, (50, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [d1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 25, 64, 64), (102400, 4096, 64, 1))
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, add, x], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_add_convolution_leaky_relu_0.run(buf2, primals_5, primals_3, 819200, grid=grid(819200), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [d2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 25, 64, 64), (102400, 4096, 64, 1))
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, add_1, x_1], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_0.run(buf5, primals_9, buf2, 819200, grid=grid(819200), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [d3], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 25, 64, 64), (102400, 4096, 64, 1))
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf5, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [conv2d_5, add_2, x_2], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_0.run(buf8, primals_13, buf5, 819200, grid=grid(819200), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 25, 64, 64), (102400, 4096, 64, 1))
buf10 = empty_strided_cuda((4, 100, 64, 64), (409600, 4096, 64, 1), torch.float32)
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [cat, x_4], Original ATen: [aten.cat, aten.leaky_relu]
triton_poi_fused_cat_leaky_relu_1.run(buf11, buf0, primals_2, buf3, primals_7, buf6, primals_11, buf9, primals_15, 1638400, grid=grid(1638400), stream=stream0)
del buf0
del buf3
del buf6
del buf9
del primals_11
del primals_15
del primals_2
del primals_7
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf13, primals_17, 819200, grid=grid(819200), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 12, 64, 64), (49152, 4096, 64, 1))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf15, primals_19, 196608, grid=grid(196608), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, primals_20, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 12, 31, 31), (11532, 961, 31, 1))
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution]
triton_poi_fused_convolution_4.run(buf17, primals_21, 46128, grid=grid(46128), stream=stream0)
del primals_21
# Topologically Sorted Source Nodes: [x2], Original ATen: [aten.max_pool2d_with_indices]
buf18 = torch.ops.aten.max_pool2d_with_indices.default(buf17, [7, 7], [3, 3])
buf19 = buf18[0]
buf20 = buf18[1]
del buf18
# Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution]
buf21 = extern_kernels.convolution(buf19, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 12, 9, 9), (972, 81, 9, 1))
buf22 = buf21; del buf21 # reuse
# Topologically Sorted Source Nodes: [conv2d_10, x2_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_5.run(buf22, primals_23, 3888, grid=grid(3888), stream=stream0)
del primals_23
# Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution]
buf23 = extern_kernels.convolution(buf22, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 12, 9, 9), (972, 81, 9, 1))
buf24 = buf23; del buf23 # reuse
# Topologically Sorted Source Nodes: [conv2d_11, x2_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_5.run(buf24, primals_25, 3888, grid=grid(3888), stream=stream0)
del primals_25
# Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution]
buf25 = extern_kernels.convolution(buf24, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 12, 9, 9), (972, 81, 9, 1))
buf26 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x2_3], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_6.run(buf26, 64, grid=grid(64), stream=stream0)
buf27 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x2_3], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_7.run(buf27, 64, grid=grid(64), stream=stream0)
buf28 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x2_3], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_6.run(buf28, 64, grid=grid(64), stream=stream0)
buf29 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x2_3], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_7.run(buf29, 64, grid=grid(64), stream=stream0)
buf30 = empty_strided_cuda((64, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x2_3], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_8.run(buf30, 64, grid=grid(64), stream=stream0)
buf32 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x2_3], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_8.run(buf32, 64, grid=grid(64), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution]
buf34 = extern_kernels.convolution(buf15, primals_28, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 12, 64, 64), (49152, 4096, 64, 1))
buf33 = empty_strided_cuda((4, 12, 64, 64), (49152, 4096, 64, 1), torch.float32)
buf35 = buf33; del buf33 # reuse
# Topologically Sorted Source Nodes: [conv2d_12, x2_3, conv2d_13, add_3], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_mul_sub_9.run(buf35, buf26, buf28, buf25, primals_27, buf29, buf30, buf27, buf32, buf34, primals_29, 196608, grid=grid(196608), stream=stream0)
del buf25
del buf34
del primals_27
del primals_29
# Topologically Sorted Source Nodes: [x2_4], Original ATen: [aten.convolution]
buf36 = extern_kernels.convolution(buf35, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf37 = buf36; del buf36 # reuse
buf38 = empty_strided_cuda((4, 50, 64, 64), (204800, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x2_4, sigmoid, x_5], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
triton_poi_fused_convolution_mul_sigmoid_10.run(buf37, primals_31, buf13, buf38, 819200, grid=grid(819200), stream=stream0)
del primals_31
return (buf38, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, buf2, buf5, buf8, buf11, buf13, buf15, buf17, buf19, buf20, buf22, buf24, buf26, buf27, buf28, buf29, buf30, buf32, buf35, buf37, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((25, 50, 1, 1), (50, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((25, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 50, 64, 64), (204800, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((50, 50, 3, 3), (450, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((25, 50, 1, 1), (50, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((25, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((50, 50, 3, 3), (450, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((25, 50, 1, 1), (50, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((25, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((50, 50, 3, 3), (450, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((25, 50, 3, 3), (450, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((25, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((50, 100, 1, 1), (100, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((12, 50, 1, 1), (50, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((12, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((12, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((12, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((12, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((12, 12, 1, 1), (12, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((50, 12, 1, 1), (12, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from collections import OrderedDict
import torch.nn.functional as F
def sequential(*args):
"""Advanced nn.Sequential.
Args:
nn.Sequential, nn.Module
Returns:
nn.Sequential
"""
if len(args) == 1:
if isinstance(args[0], OrderedDict):
raise NotImplementedError(
'sequential does not support OrderedDict input.')
return args[0]
modules = []
for module in args:
if isinstance(module, nn.Sequential):
for submodule in module.children():
modules.append(submodule)
elif isinstance(module, nn.Module):
modules.append(module)
return nn.Sequential(*modules)
def conv(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=
1, bias=True, mode='CBR', negative_slope=0.2):
L = []
for t in mode:
if t == 'C':
L.append(nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, bias=bias))
elif t == 'T':
L.append(nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=kernel_size, stride=
stride, padding=padding, bias=bias))
elif t == 'B':
L.append(nn.BatchNorm2d(out_channels, momentum=0.9, eps=0.0001,
affine=True))
elif t == 'I':
L.append(nn.InstanceNorm2d(out_channels, affine=True))
elif t == 'R':
L.append(nn.ReLU(inplace=True))
elif t == 'r':
L.append(nn.ReLU(inplace=False))
elif t == 'L':
L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=True))
elif t == 'l':
L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=False)
)
elif t == '2':
L.append(nn.PixelShuffle(upscale_factor=2))
elif t == '3':
L.append(nn.PixelShuffle(upscale_factor=3))
elif t == '4':
L.append(nn.PixelShuffle(upscale_factor=4))
elif t == 'U':
L.append(nn.Upsample(scale_factor=2, mode='nearest'))
elif t == 'u':
L.append(nn.Upsample(scale_factor=3, mode='nearest'))
elif t == 'v':
L.append(nn.Upsample(scale_factor=4, mode='nearest'))
elif t == 'M':
L.append(nn.MaxPool2d(kernel_size=kernel_size, stride=stride,
padding=0))
elif t == 'A':
L.append(nn.AvgPool2d(kernel_size=kernel_size, stride=stride,
padding=0))
else:
raise NotImplementedError('Undefined type: ')
return sequential(*L)
class ESA(nn.Module):
def __init__(self, channel=64, reduction=4, bias=True):
super(ESA, self).__init__()
self.r_nc = channel // reduction
self.conv1 = nn.Conv2d(channel, self.r_nc, kernel_size=1)
self.conv21 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=1)
self.conv2 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, stride=
2, padding=0)
self.conv3 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv6 = nn.Conv2d(self.r_nc, channel, kernel_size=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x1 = self.conv1(x)
x2 = F.max_pool2d(self.conv2(x1), kernel_size=7, stride=3)
x2 = self.relu(self.conv3(x2))
x2 = self.relu(self.conv4(x2))
x2 = F.interpolate(self.conv5(x2), (x.size(2), x.size(3)), mode=
'bilinear', align_corners=False)
x2 = self.conv6(x2 + self.conv21(x1))
return x.mul(self.sigmoid(x2))
class CFRB(nn.Module):
def __init__(self, in_channels=50, out_channels=50, kernel_size=3,
stride=1, padding=1, bias=True, mode='CL', d_rate=0.5,
negative_slope=0.05):
super(CFRB, self).__init__()
self.d_nc = int(in_channels * d_rate)
self.r_nc = in_channels
assert mode[0] == 'C', 'convolutional layer first'
self.conv1_d = conv(in_channels, self.d_nc, kernel_size=1, stride=1,
padding=0, bias=bias, mode=mode[0])
self.conv1_r = conv(in_channels, self.r_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv2_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1,
padding=0, bias=bias, mode=mode[0])
self.conv2_r = conv(self.r_nc, self.r_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv3_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1,
padding=0, bias=bias, mode=mode[0])
self.conv3_r = conv(self.r_nc, self.r_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv4_d = conv(self.r_nc, self.d_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv1x1 = conv(self.d_nc * 4, out_channels, kernel_size=1,
stride=1, padding=0, bias=bias, mode=mode[0])
self.act = conv(mode=mode[-1], negative_slope=negative_slope)
self.esa = ESA(in_channels, reduction=4, bias=True)
def forward(self, x):
d1 = self.conv1_d(x)
x = self.act(self.conv1_r(x) + x)
d2 = self.conv2_d(x)
x = self.act(self.conv2_r(x) + x)
d3 = self.conv3_d(x)
x = self.act(self.conv3_r(x) + x)
x = self.conv4_d(x)
x = self.act(torch.cat([d1, d2, d3, x], dim=1))
x = self.esa(self.conv1x1(x))
return x
def get_inputs():
return [torch.rand([4, 50, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from collections import OrderedDict
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 50
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.05
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(in_out_ptr0 + x3, tmp9, None)
@triton.jit
def triton_poi_fused_cat_leaky_relu_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 100
x0 = xindex % 4096
x2 = xindex // 409600
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 25, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 102400 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 50, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr2 + (x0 + 4096 * (-25 + x1) + 102400 * x2), tmp13,
other=0.0)
tmp15 = tl.load(in_ptr3 + (-25 + x1), tmp13, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tmp20 = tl.full([1], 75, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr4 + (x0 + 4096 * (-50 + x1) + 102400 * x2), tmp22,
other=0.0)
tmp24 = tl.load(in_ptr5 + (-50 + x1), tmp22, eviction_policy=
'evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp22, tmp25, tmp26)
tmp28 = tmp0 >= tmp20
tl.full([1], 100, tl.int64)
tmp31 = tl.load(in_ptr6 + (x0 + 4096 * (-75 + x1) + 102400 * x2), tmp28,
other=0.0)
tmp32 = tl.load(in_ptr7 + (-75 + x1), tmp28, eviction_policy=
'evict_last', other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp28, tmp33, tmp34)
tmp36 = tl.where(tmp22, tmp27, tmp35)
tmp37 = tl.where(tmp13, tmp18, tmp36)
tmp38 = tl.where(tmp4, tmp9, tmp37)
tmp39 = 0.0
tmp40 = tmp38 > tmp39
tmp41 = 0.05
tmp42 = tmp38 * tmp41
tmp43 = tl.where(tmp40, tmp38, tmp42)
tl.store(in_out_ptr0 + x3, tmp43, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 50
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 12
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 46128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 961 % 12
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 3888
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 81 % 12
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused__to_copy_6(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_clamp_7(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_8(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = 1.0
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tl.store(out_ptr0 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_9(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
in_ptr8, in_ptr9, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x5 = xindex // 4096
x2 = xindex // 4096 % 12
x6 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr8 + x6, None)
tmp38 = tl.load(in_ptr9 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 9, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 9 * tmp4 + 81 * x5), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + 9 * tmp4 + 81 * x5), None,
eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + 9 * tmp25 + 81 * x5), None,
eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + 9 * tmp25 + 81 * x5), None,
eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tmp36 = tmp21 + tmp35
tmp39 = tmp37 + tmp38
tmp40 = tmp36 + tmp39
tl.store(in_out_ptr0 + x6, tmp40, None)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_10(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 50
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp2)
tmp5 = tmp3 * tmp4
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(out_ptr0 + x3, tmp5, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31) = args
args.clear()
assert_size_stride(primals_1, (25, 50, 1, 1), (50, 1, 1, 1))
assert_size_stride(primals_2, (25,), (1,))
assert_size_stride(primals_3, (4, 50, 64, 64), (204800, 4096, 64, 1))
assert_size_stride(primals_4, (50, 50, 3, 3), (450, 9, 3, 1))
assert_size_stride(primals_5, (50,), (1,))
assert_size_stride(primals_6, (25, 50, 1, 1), (50, 1, 1, 1))
assert_size_stride(primals_7, (25,), (1,))
assert_size_stride(primals_8, (50, 50, 3, 3), (450, 9, 3, 1))
assert_size_stride(primals_9, (50,), (1,))
assert_size_stride(primals_10, (25, 50, 1, 1), (50, 1, 1, 1))
assert_size_stride(primals_11, (25,), (1,))
assert_size_stride(primals_12, (50, 50, 3, 3), (450, 9, 3, 1))
assert_size_stride(primals_13, (50,), (1,))
assert_size_stride(primals_14, (25, 50, 3, 3), (450, 9, 3, 1))
assert_size_stride(primals_15, (25,), (1,))
assert_size_stride(primals_16, (50, 100, 1, 1), (100, 1, 1, 1))
assert_size_stride(primals_17, (50,), (1,))
assert_size_stride(primals_18, (12, 50, 1, 1), (50, 1, 1, 1))
assert_size_stride(primals_19, (12,), (1,))
assert_size_stride(primals_20, (12, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_21, (12,), (1,))
assert_size_stride(primals_22, (12, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_23, (12,), (1,))
assert_size_stride(primals_24, (12, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_25, (12,), (1,))
assert_size_stride(primals_26, (12, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_27, (12,), (1,))
assert_size_stride(primals_28, (12, 12, 1, 1), (12, 1, 1, 1))
assert_size_stride(primals_29, (12,), (1,))
assert_size_stride(primals_30, (50, 12, 1, 1), (12, 1, 1, 1))
assert_size_stride(primals_31, (50,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 25, 64, 64), (102400, 4096, 64, 1))
buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_convolution_leaky_relu_0[grid(819200)](buf2,
primals_5, primals_3, 819200, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_5
buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 25, 64, 64), (102400, 4096, 64, 1))
buf4 = extern_kernels.convolution(buf2, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_add_convolution_leaky_relu_0[grid(819200)](buf5,
primals_9, buf2, 819200, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf6 = extern_kernels.convolution(buf5, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 25, 64, 64), (102400, 4096, 64, 1))
buf7 = extern_kernels.convolution(buf5, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf8 = buf7
del buf7
triton_poi_fused_add_convolution_leaky_relu_0[grid(819200)](buf8,
primals_13, buf5, 819200, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf9 = extern_kernels.convolution(buf8, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 25, 64, 64), (102400, 4096, 64, 1))
buf10 = empty_strided_cuda((4, 100, 64, 64), (409600, 4096, 64, 1),
torch.float32)
buf11 = buf10
del buf10
triton_poi_fused_cat_leaky_relu_1[grid(1638400)](buf11, buf0,
primals_2, buf3, primals_7, buf6, primals_11, buf9, primals_15,
1638400, XBLOCK=512, num_warps=8, num_stages=1)
del buf0
del buf3
del buf6
del buf9
del primals_11
del primals_15
del primals_2
del primals_7
buf12 = extern_kernels.convolution(buf11, primals_16, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_2[grid(819200)](buf13, primals_17,
819200, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf14 = extern_kernels.convolution(buf13, primals_18, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 12, 64, 64), (49152, 4096, 64, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_3[grid(196608)](buf15, primals_19,
196608, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_19
buf16 = extern_kernels.convolution(buf15, primals_20, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 12, 31, 31), (11532, 961, 31, 1))
buf17 = buf16
del buf16
triton_poi_fused_convolution_4[grid(46128)](buf17, primals_21,
46128, XBLOCK=512, num_warps=4, num_stages=1)
del primals_21
buf18 = torch.ops.aten.max_pool2d_with_indices.default(buf17, [7, 7
], [3, 3])
buf19 = buf18[0]
buf20 = buf18[1]
del buf18
buf21 = extern_kernels.convolution(buf19, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 12, 9, 9), (972, 81, 9, 1))
buf22 = buf21
del buf21
triton_poi_fused_convolution_relu_5[grid(3888)](buf22, primals_23,
3888, XBLOCK=256, num_warps=4, num_stages=1)
del primals_23
buf23 = extern_kernels.convolution(buf22, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 12, 9, 9), (972, 81, 9, 1))
buf24 = buf23
del buf23
triton_poi_fused_convolution_relu_5[grid(3888)](buf24, primals_25,
3888, XBLOCK=256, num_warps=4, num_stages=1)
del primals_25
buf25 = extern_kernels.convolution(buf24, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 12, 9, 9), (972, 81, 9, 1))
buf26 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_6[grid(64)](buf26, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf27 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_7[grid(64)](buf27, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf28 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_6[grid(64)](buf28, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf29 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_add_clamp_7[grid(64)](buf29, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf30 = empty_strided_cuda((64,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_8[grid(64)](buf30,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf32 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_8[grid(64)](buf32,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf34 = extern_kernels.convolution(buf15, primals_28, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 12, 64, 64), (49152, 4096, 64, 1))
buf33 = empty_strided_cuda((4, 12, 64, 64), (49152, 4096, 64, 1),
torch.float32)
buf35 = buf33
del buf33
triton_poi_fused__unsafe_index_add_convolution_mul_sub_9[grid(196608)](
buf35, buf26, buf28, buf25, primals_27, buf29, buf30, buf27,
buf32, buf34, primals_29, 196608, XBLOCK=512, num_warps=8,
num_stages=1)
del buf25
del buf34
del primals_27
del primals_29
buf36 = extern_kernels.convolution(buf35, primals_30, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf37 = buf36
del buf36
buf38 = empty_strided_cuda((4, 50, 64, 64), (204800, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_mul_sigmoid_10[grid(819200)](buf37,
primals_31, buf13, buf38, 819200, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_31
return (buf38, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, primals_28,
primals_30, buf2, buf5, buf8, buf11, buf13, buf15, buf17, buf19,
buf20, buf22, buf24, buf26, buf27, buf28, buf29, buf30, buf32,
buf35, buf37)
def sequential(*args):
"""Advanced nn.Sequential.
Args:
nn.Sequential, nn.Module
Returns:
nn.Sequential
"""
if len(args) == 1:
if isinstance(args[0], OrderedDict):
raise NotImplementedError(
'sequential does not support OrderedDict input.')
return args[0]
modules = []
for module in args:
if isinstance(module, nn.Sequential):
for submodule in module.children():
modules.append(submodule)
elif isinstance(module, nn.Module):
modules.append(module)
return nn.Sequential(*modules)
def conv(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=
1, bias=True, mode='CBR', negative_slope=0.2):
L = []
for t in mode:
if t == 'C':
L.append(nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, bias=bias))
elif t == 'T':
L.append(nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=kernel_size, stride=
stride, padding=padding, bias=bias))
elif t == 'B':
L.append(nn.BatchNorm2d(out_channels, momentum=0.9, eps=0.0001,
affine=True))
elif t == 'I':
L.append(nn.InstanceNorm2d(out_channels, affine=True))
elif t == 'R':
L.append(nn.ReLU(inplace=True))
elif t == 'r':
L.append(nn.ReLU(inplace=False))
elif t == 'L':
L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=True))
elif t == 'l':
L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=False)
)
elif t == '2':
L.append(nn.PixelShuffle(upscale_factor=2))
elif t == '3':
L.append(nn.PixelShuffle(upscale_factor=3))
elif t == '4':
L.append(nn.PixelShuffle(upscale_factor=4))
elif t == 'U':
L.append(nn.Upsample(scale_factor=2, mode='nearest'))
elif t == 'u':
L.append(nn.Upsample(scale_factor=3, mode='nearest'))
elif t == 'v':
L.append(nn.Upsample(scale_factor=4, mode='nearest'))
elif t == 'M':
L.append(nn.MaxPool2d(kernel_size=kernel_size, stride=stride,
padding=0))
elif t == 'A':
L.append(nn.AvgPool2d(kernel_size=kernel_size, stride=stride,
padding=0))
else:
raise NotImplementedError('Undefined type: ')
return sequential(*L)
class ESA(nn.Module):
def __init__(self, channel=64, reduction=4, bias=True):
super(ESA, self).__init__()
self.r_nc = channel // reduction
self.conv1 = nn.Conv2d(channel, self.r_nc, kernel_size=1)
self.conv21 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=1)
self.conv2 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, stride=
2, padding=0)
self.conv3 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv6 = nn.Conv2d(self.r_nc, channel, kernel_size=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x1 = self.conv1(x)
x2 = F.max_pool2d(self.conv2(x1), kernel_size=7, stride=3)
x2 = self.relu(self.conv3(x2))
x2 = self.relu(self.conv4(x2))
x2 = F.interpolate(self.conv5(x2), (x.size(2), x.size(3)), mode=
'bilinear', align_corners=False)
x2 = self.conv6(x2 + self.conv21(x1))
return x.mul(self.sigmoid(x2))
class CFRBNew(nn.Module):
def __init__(self, in_channels=50, out_channels=50, kernel_size=3,
stride=1, padding=1, bias=True, mode='CL', d_rate=0.5,
negative_slope=0.05):
super(CFRBNew, self).__init__()
self.d_nc = int(in_channels * d_rate)
self.r_nc = in_channels
assert mode[0] == 'C', 'convolutional layer first'
self.conv1_d = conv(in_channels, self.d_nc, kernel_size=1, stride=1,
padding=0, bias=bias, mode=mode[0])
self.conv1_r = conv(in_channels, self.r_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv2_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1,
padding=0, bias=bias, mode=mode[0])
self.conv2_r = conv(self.r_nc, self.r_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv3_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1,
padding=0, bias=bias, mode=mode[0])
self.conv3_r = conv(self.r_nc, self.r_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv4_d = conv(self.r_nc, self.d_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv1x1 = conv(self.d_nc * 4, out_channels, kernel_size=1,
stride=1, padding=0, bias=bias, mode=mode[0])
self.act = conv(mode=mode[-1], negative_slope=negative_slope)
self.esa = ESA(in_channels, reduction=4, bias=True)
def forward(self, input_0):
primals_1 = self.conv1_d.weight
primals_2 = self.conv1_d.bias
primals_4 = self.conv1_r.weight
primals_5 = self.conv1_r.bias
primals_6 = self.conv2_d.weight
primals_7 = self.conv2_d.bias
primals_8 = self.conv2_r.weight
primals_9 = self.conv2_r.bias
primals_10 = self.conv3_d.weight
primals_11 = self.conv3_d.bias
primals_12 = self.conv3_r.weight
primals_13 = self.conv3_r.bias
primals_14 = self.conv4_d.weight
primals_15 = self.conv4_d.bias
primals_16 = self.conv1x1.weight
primals_17 = self.conv1x1.bias
primals_18 = self.esa.conv1.weight
primals_19 = self.esa.conv1.bias
primals_28 = self.esa.conv21.weight
primals_21 = self.esa.conv21.bias
primals_20 = self.esa.conv2.weight
primals_23 = self.esa.conv2.bias
primals_22 = self.esa.conv3.weight
primals_25 = self.esa.conv3.bias
primals_24 = self.esa.conv4.weight
primals_27 = self.esa.conv4.bias
primals_26 = self.esa.conv5.weight
primals_29 = self.esa.conv5.bias
primals_30 = self.esa.conv6.weight
primals_31 = self.esa.conv6.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31])
return output[0]
| wwjfsfs/wwjyyds | CFRB | false | 13,137 | [
"MIT"
]
| 0 | 80cd6267fde7cd98838078a0d5178a557ceb7414 | https://github.com/wwjfsfs/wwjyyds/tree/80cd6267fde7cd98838078a0d5178a557ceb7414 |
FFN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/37/c37iajzamvg4r3s5ikb4y6kka2x3towdlz4bqoh3dx4uywvya2mb.py
# Topologically Sorted Source Nodes: [x_1, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# relu => relu
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/lf/clf7hs52i4bd5d3e73uio27ntyjfqmszkbsw6dta3r6rzgeftva3.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_2 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/tr/ctrdeeo45yfmpbksxog7is2d6fd26mv2poki6u26emzhamo2zqxd.py
# Topologically Sorted Source Nodes: [x_4, x_6], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# x_4 => add
# x_6 => clone_1, var_mean
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_1, %primals_1), kwargs = {})
# %clone_1 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%add,), kwargs = {memory_format: torch.contiguous_format})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone_1, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_3 = async_compile.triton('triton_poi_fused_add_native_layer_norm_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp4 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp8 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp12 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/px/cpxbmtafvoqnd5j3oyskd4thxpat5nbj25jgagf6an6xgvaf47sv.py
# Topologically Sorted Source Nodes: [x_4, x_6], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# x_4 => add
# x_6 => add_1, add_2, clone_1, mul, mul_1, rsqrt, sub
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_1, %primals_1), kwargs = {})
# %clone_1 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%add,), kwargs = {memory_format: torch.contiguous_format})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_6), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_7), kwargs = {})
triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (y3), ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (y3), ymask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2 + (4*y3)), tmp13, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (16, ), (1, ))
assert_size_stride(primals_4, (4, 16, 1), (16, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 16, 4), (64, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1, relu], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4), (16, 4, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf4, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [x_4, x_6], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_3.run(buf4, primals_1, buf5, buf6, 16, grid=grid(16), stream=stream0)
buf7 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_4, x_6], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_4.run(buf4, primals_1, buf5, buf6, primals_6, primals_7, buf7, 16, 4, grid=grid(16, 4), stream=stream0)
del buf5
del buf6
del primals_7
return (buf7, primals_1, primals_2, primals_4, primals_6, buf2, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 16, 1), (16, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch as t
class Conv(nn.Module):
"""
Convolution Module
"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, dilation=1, bias=True, w_init='linear'):
"""
:param in_channels: dimension of input
:param out_channels: dimension of output
:param kernel_size: size of kernel
:param stride: size of stride
:param padding: size of padding
:param dilation: dilation rate
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super(Conv, self).__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
bias=bias)
nn.init.xavier_uniform_(self.conv.weight, gain=nn.init.
calculate_gain(w_init))
def forward(self, x):
x = self.conv(x)
return x
class FFN(nn.Module):
"""
Positionwise Feed-Forward Network
"""
def __init__(self, num_hidden):
"""
:param num_hidden: dimension of hidden
"""
super(FFN, self).__init__()
self.w_1 = Conv(num_hidden, num_hidden * 4, kernel_size=1, w_init=
'relu')
self.w_2 = Conv(num_hidden * 4, num_hidden, kernel_size=1)
self.dropout = nn.Dropout(p=0.1)
self.layer_norm = nn.LayerNorm(num_hidden)
def forward(self, input_):
x = input_.transpose(1, 2)
x = self.w_2(t.relu(self.w_1(x)))
x = x.transpose(1, 2)
x = x + input_
x = self.dropout(x)
x = self.layer_norm(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_hidden': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + y3, ymask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2 + 4 * y3), tmp13, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (4, 16, 1), (16, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 16, 4), (64, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(256)](buf2, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4), (16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_2[grid(64)](buf4, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_3[grid(16)](buf4, primals_1,
buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf7 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_4[grid(16, 4)](buf4,
primals_1, buf5, buf6, primals_6, primals_7, buf7, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del buf5
del buf6
del primals_7
return buf7, primals_1, primals_2, primals_4, primals_6, buf2, buf4
class Conv(nn.Module):
"""
Convolution Module
"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, dilation=1, bias=True, w_init='linear'):
"""
:param in_channels: dimension of input
:param out_channels: dimension of output
:param kernel_size: size of kernel
:param stride: size of stride
:param padding: size of padding
:param dilation: dilation rate
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super(Conv, self).__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
bias=bias)
nn.init.xavier_uniform_(self.conv.weight, gain=nn.init.
calculate_gain(w_init))
def forward(self, x):
x = self.conv(x)
return x
class FFNNew(nn.Module):
"""
Positionwise Feed-Forward Network
"""
def __init__(self, num_hidden):
"""
:param num_hidden: dimension of hidden
"""
super(FFNNew, self).__init__()
self.w_1 = Conv(num_hidden, num_hidden * 4, kernel_size=1, w_init=
'relu')
self.w_2 = Conv(num_hidden * 4, num_hidden, kernel_size=1)
self.dropout = nn.Dropout(p=0.1)
self.layer_norm = nn.LayerNorm(num_hidden)
def forward(self, input_0):
primals_2 = self.w_1.conv.weight
primals_3 = self.w_1.conv.bias
primals_4 = self.w_2.conv.weight
primals_5 = self.w_2.conv.bias
primals_6 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| yhgon/Transformer-TTS | FFN | false | 13,138 | [
"MIT"
]
| 0 | 5f34945cb5500d484275700c4e393ed125d5e753 | https://github.com/yhgon/Transformer-TTS/tree/5f34945cb5500d484275700c4e393ed125d5e753 |
MLP | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/nh/cnhx37tsffx4r7taj3xi72s7yfpnnccem24fupfbht6b7bzliavu.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# x_2 => add, erf, mul, mul_1, mul_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
triton_poi_fused_gelu_0 = async_compile.triton('triton_poi_fused_gelu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.gelu]
stream0 = get_raw_stream(0)
triton_poi_fused_gelu_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
return (buf1, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.autograd
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, n_in, n_out, dropout=0):
super().__init__()
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.GELU()
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.dropout(x)
x = self.linear(x)
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4, 'n_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.autograd
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0
class MLPNew(nn.Module):
def __init__(self, n_in, n_out, dropout=0):
super().__init__()
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.GELU()
self.dropout = nn.Dropout(dropout)
def forward(self, input_0):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| yifding/W2NER | MLP | false | 13,139 | [
"MIT"
]
| 0 | d13128e45f3930a8b8faa794318939dc90a75974 | https://github.com/yifding/W2NER/tree/d13128e45f3930a8b8faa794318939dc90a75974 |
ActorDownAction | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/cz/cczdmlbbmfz5zwvgavqgsp7p2chtvjm2zzbxhjk7w5jaagtfot3j.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%tanh, %tanh_1], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp4, tmp6, tmp7)
tmp9 = tmp0 >= tmp3
tmp10 = tl.full([1], 8, tl.int64)
tmp11 = tmp0 < tmp10
tmp12 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = libdevice.tanh(tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp9, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp8, tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/m6/cm6ozsdmt5vl54fxwk7cgktzswysgn2c37vsaybpucplzehkrnnz.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = (xindex // 1600)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/64/c64g5uxk2a5hbzuhd6oikla2gb5eyfjjb6kbh7btzswha52gl5ex.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = (xindex // 1200)
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + (1216*x2)), tmp4, xmask)
tl.store(out_ptr1 + (x3 + (1280*x2)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/4h/c4h6r6vefoeuinm5eqv2d6wqmfj2mnjacalp633y3m6bnseb2bnk.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.relu, aten.view]
# Source node to ATen node mapping:
# x_1 => relu_1
# x_2 => view_4
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %view_4 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu_1, [64, 300]), kwargs = {})
triton_poi_fused_relu_view_3 = async_compile.triton('triton_poi_fused_relu_view_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_view_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = (xindex // 300)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (300*(x1 % 4)) + (1216*(x1 // 4))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/yl/cyl3pepuyms7had2pbaeyfxi5l2wn7v5miawhd67mloipgsmgxwf.py
# Topologically Sorted Source Nodes: [tanh_1, action], Original ATen: [aten.tanh, aten.mul]
# Source node to ATen node mapping:
# action => mul
# tanh_1 => tanh_2
# Graph fragment:
# %tanh_2 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh_2, 4), kwargs = {})
triton_poi_fused_mul_tanh_4 = async_compile.triton('triton_poi_fused_mul_tanh_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_tanh_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_tanh_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 4.0
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/tc/ctcyhc3j7gcndovmump6plsamm3m4336gv7k75wru64k3klwdqb5.py
# Topologically Sorted Source Nodes: [msg_down], Original ATen: [aten.linalg_vector_norm, aten.div]
# Source node to ATen node mapping:
# msg_down => div, pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_11, 2.0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, %expand), kwargs = {})
triton_per_fused_div_linalg_vector_norm_5 = async_compile.triton('triton_per_fused_div_linalg_vector_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_linalg_vector_norm_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_linalg_vector_norm_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp7 = 1e-12
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp0 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr0 + (r1 + (16*x0)), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (400, 8), (8, 1))
assert_size_stride(primals_4, (400, ), (1, ))
assert_size_stride(primals_5, (300, 400), (400, 1))
assert_size_stride(primals_6, (300, ), (1, ))
assert_size_stride(primals_7, (4, 300), (300, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (400, 8), (8, 1))
assert_size_stride(primals_10, (400, ), (1, ))
assert_size_stride(primals_11, (300, 400), (400, 1))
assert_size_stride(primals_12, (300, ), (1, ))
assert_size_stride(primals_13, (16, 300), (300, 1))
assert_size_stride(primals_14, (16, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 400), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf1 # reuse
buf20 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_4, buf20, 25600, grid=grid(25600), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 400), (400, 1), 0), reinterpret_tensor(primals_5, (400, 300), (1, 400), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32)
buf19 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf3, primals_6, buf4, buf19, 19200, grid=grid(19200), stream=stream0)
del primals_6
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.relu, aten.view]
triton_poi_fused_relu_view_3.run(buf4, buf5, 19200, grid=grid(19200), stream=stream0)
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf5, reinterpret_tensor(primals_7, (300, 4), (1, 300), 0), alpha=1, beta=1, out=buf6)
del primals_8
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tanh_1, action], Original ATen: [aten.tanh, aten.mul]
triton_poi_fused_mul_tanh_4.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
buf8 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_9, (8, 400), (1, 8), 0), out=buf8)
del primals_9
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf8 # reuse
buf18 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf9, primals_10, buf18, 25600, grid=grid(25600), stream=stream0)
del primals_10
buf10 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf9, (64, 400), (400, 1), 0), reinterpret_tensor(primals_11, (400, 300), (1, 400), 0), out=buf10)
buf11 = buf4; del buf4 # reuse
buf17 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf10, primals_12, buf11, buf17, 19200, grid=grid(19200), stream=stream0)
del primals_12
buf12 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.relu, aten.view]
triton_poi_fused_relu_view_3.run(buf11, buf12, 19200, grid=grid(19200), stream=stream0)
del buf11
buf13 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_14, buf12, reinterpret_tensor(primals_13, (300, 16), (1, 300), 0), alpha=1, beta=1, out=buf13)
del primals_14
buf14 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf15 = reinterpret_tensor(buf14, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf14 # reuse
buf16 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [msg_down], Original ATen: [aten.linalg_vector_norm, aten.div]
triton_per_fused_div_linalg_vector_norm_5.run(buf15, buf13, buf16, 64, 16, grid=grid(64), stream=stream0)
return (buf7, buf16, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(buf2, (64, 400), (400, 1), 0), buf5, buf6, reinterpret_tensor(buf9, (64, 400), (400, 1), 0), buf12, buf13, buf15, primals_13, buf17, primals_11, buf18, primals_7, buf19, primals_5, buf20, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((400, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 300), (300, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((400, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((16, 300), (300, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class MLPBase(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(MLPBase, self).__init__()
self.l1 = nn.Linear(num_inputs, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, num_outputs)
def forward(self, inputs):
x = F.relu(self.l1(inputs))
x = F.relu(self.l2(x))
x = self.l3(x)
return x
class ActorDownAction(nn.Module):
"""a top-down module used in bothway message passing that passes messages to children and outputs action"""
def __init__(self, self_input_dim, action_dim, msg_dim, max_action,
max_children):
super(ActorDownAction, self).__init__()
self.max_action = max_action
self.action_base = MLPBase(self_input_dim + msg_dim, action_dim)
self.msg_base = MLPBase(self_input_dim + msg_dim, msg_dim *
max_children)
def forward(self, x, m):
xm = torch.cat((x, m), dim=-1)
xm = torch.tanh(xm)
action = self.max_action * torch.tanh(self.action_base(xm))
msg_down = self.msg_base(xm)
msg_down = F.normalize(msg_down, dim=-1)
return action, msg_down
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'self_input_dim': 4, 'action_dim': 4, 'msg_dim': 4,
'max_action': 4, 'max_children': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp4, tmp6, tmp7)
tmp9 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp12 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = libdevice.tanh(tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp9, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp8, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = xindex // 1200
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = xindex // 300
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_mul_tanh_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 4.0
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_per_fused_div_linalg_vector_norm_5(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp7 = 1e-12
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp0 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp9, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (400, 8), (8, 1))
assert_size_stride(primals_4, (400,), (1,))
assert_size_stride(primals_5, (300, 400), (400, 1))
assert_size_stride(primals_6, (300,), (1,))
assert_size_stride(primals_7, (4, 300), (300, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (400, 8), (8, 1))
assert_size_stride(primals_10, (400,), (1,))
assert_size_stride(primals_11, (300, 400), (400, 1))
assert_size_stride(primals_12, (300,), (1,))
assert_size_stride(primals_13, (16, 300), (300, 1))
assert_size_stride(primals_14, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 400), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf1
buf20 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(25600)](buf2,
primals_4, buf20, 25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_5, (400, 300), (1, 400), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1),
torch.float32)
buf19 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(19200)](buf3,
primals_6, buf4, buf19, 19200, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_6
buf5 = buf3
del buf3
triton_poi_fused_relu_view_3[grid(19200)](buf4, buf5, 19200, XBLOCK
=256, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, buf5, reinterpret_tensor(primals_7,
(300, 4), (1, 300), 0), alpha=1, beta=1, out=buf6)
del primals_8
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_tanh_4[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_9, (8, 400), (1, 8), 0), out=buf8)
del primals_9
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf8
buf18 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(25600)](buf9,
primals_10, buf18, 25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_10
buf10 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_11, (400, 300), (1, 400), 0), out=buf10)
buf11 = buf4
del buf4
buf17 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(19200)](buf10,
primals_12, buf11, buf17, 19200, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_12
buf12 = buf10
del buf10
triton_poi_fused_relu_view_3[grid(19200)](buf11, buf12, 19200,
XBLOCK=256, num_warps=4, num_stages=1)
del buf11
buf13 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_14, buf12, reinterpret_tensor(
primals_13, (300, 16), (1, 300), 0), alpha=1, beta=1, out=buf13)
del primals_14
buf14 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf15 = reinterpret_tensor(buf14, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf14
buf16 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.
float32)
triton_per_fused_div_linalg_vector_norm_5[grid(64)](buf15, buf13,
buf16, 64, 16, XBLOCK=32, num_warps=4, num_stages=1)
return (buf7, buf16, reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(buf2, (64, 400), (400, 1), 0), buf5, buf6,
reinterpret_tensor(buf9, (64, 400), (400, 1), 0), buf12, buf13,
buf15, primals_13, buf17, primals_11, buf18, primals_7, buf19,
primals_5, buf20)
class MLPBase(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(MLPBase, self).__init__()
self.l1 = nn.Linear(num_inputs, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, num_outputs)
def forward(self, inputs):
x = F.relu(self.l1(inputs))
x = F.relu(self.l2(x))
x = self.l3(x)
return x
class ActorDownActionNew(nn.Module):
"""a top-down module used in bothway message passing that passes messages to children and outputs action"""
def __init__(self, self_input_dim, action_dim, msg_dim, max_action,
max_children):
super(ActorDownActionNew, self).__init__()
self.max_action = max_action
self.action_base = MLPBase(self_input_dim + msg_dim, action_dim)
self.msg_base = MLPBase(self_input_dim + msg_dim, msg_dim *
max_children)
def forward(self, input_0, input_1):
primals_3 = self.action_base.l1.weight
primals_4 = self.action_base.l1.bias
primals_5 = self.action_base.l2.weight
primals_6 = self.action_base.l2.bias
primals_7 = self.action_base.l3.weight
primals_8 = self.action_base.l3.bias
primals_9 = self.msg_base.l1.weight
primals_10 = self.msg_base.l1.bias
primals_11 = self.msg_base.l2.weight
primals_12 = self.msg_base.l2.bias
primals_13 = self.msg_base.l3.weight
primals_14 = self.msg_base.l3.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0], output[1]
| yangfanthu/modular-rl | ActorDownAction | false | 13,140 | [
"BSD-2-Clause"
]
| 0 | 25c599bab641a7e732dbaf116cd240fa2358f113 | https://github.com/yangfanthu/modular-rl/tree/25c599bab641a7e732dbaf116cd240fa2358f113 |
Block | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/f2/cf23blhmrqff3yhtwgcpt4rhmfkv3i25ry4dtkijev5qamd7hqxq.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/vs/cvsfvbs4wlaqvwxm3svg65dnhcq336ptudvn6xetnbnrtzj7xssn.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/3r/c3rfy3ljjc2bfodnr5gm65jr7ew6v6kno6w6jzahlupuqxbpvfkw.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/aw/cawvwx3nv7ipnpnf2hcgwz5usu7vsw5yynj5ofrunhktjwqff5vq.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/p5/cp5wuljbdcz2dl2xvl4imkn5wmtmrnbb7mnld5glztiqavldlheh.py
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_1 => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/a4/ca4u6hbohfqkgchihihlu5hrf3vuqm27r2ncsg7xb6g4ikttl2at.py
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/vv/cvvhis67uzj3m3ebbd4sgghaemqhihabasphltk5wytqdd6fe74t.py
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul_1 => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/lw/clwfsjrjxeb2gmxy5p3lplvcrvrn37iuw4atjria32bxp2jajrtc.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_9,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/5y/c5yhyv7emyc7i2ozpvns6tsiqcvdzktqqpohy4sedfe7aihkojch.py
# Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_1 => var_mean_1
# x_1 => add_2
# x_3 => add_3
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (1))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (2))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (3))
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
tl.store(out_ptr1 + (x0), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/wk/cwkvd3urdgah2mwiynddchgrzzinh7dhvvfxtkikqtjcxphriloz.py
# Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_1 => add_4, add_5, mul_3, mul_4, rsqrt_1, sub_2
# x_1 => add_2
# x_3 => add_3
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-06), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_7), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_8), kwargs = {})
triton_poi_fused_add_native_layer_norm_9 = async_compile.triton('triton_poi_fused_add_native_layer_norm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-06
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/b4/cb43jhxvcrefkhdp7ixdoh6nmvez5h55vhlzkxtasuovu5ru7pe5.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# x_5 => add_6, erf, mul_5, mul_6, mul_7
# Graph fragment:
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_13, 0.5), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_13, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_6,), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %add_6), kwargs = {})
triton_poi_fused_gelu_10 = async_compile.triton('triton_poi_fused_gelu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/pu/cpuql3oz4hmaygynopg7lq7xhfiv7hr7pr4vyzhfpmw34jymdp7q.py
# Topologically Sorted Source Nodes: [x_1, x_3, x_9], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_1 => add_2
# x_3 => add_3
# x_9 => add_7
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %view_15), kwargs = {})
triton_poi_fused_add_11 = async_compile.triton('triton_poi_fused_add_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + (x2), xmask)
tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (16, 4), (4, 1))
assert_size_stride(primals_10, (16, ), (1, ))
assert_size_stride(primals_11, (4, 16), (16, 1))
assert_size_stride(primals_12, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, grid=grid(64), stream=stream0)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf3, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf3, buf5, 16, 4, grid=grid(16, 4), stream=stream0)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf7, buf8, 256, grid=grid(256), stream=stream0)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone]
triton_poi_fused_clone_6.run(buf3, buf9, 16, 4, grid=grid(16, 4), stream=stream0)
del buf3
buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
triton_poi_fused_clone_7.run(buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12)
buf13 = buf1; del buf1 # reuse
buf14 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_8.run(primals_3, buf12, primals_6, buf13, buf14, 16, grid=grid(16), stream=stream0)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_9.run(primals_3, buf12, primals_6, buf13, buf14, primals_7, primals_8, buf15, 64, grid=grid(64), stream=stream0)
del buf13
del buf14
del primals_8
buf16 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf16)
del primals_10
buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.gelu]
triton_poi_fused_gelu_10.run(buf16, buf17, 256, grid=grid(256), stream=stream0)
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [x_1, x_3, x_9], Original ATen: [aten.add]
triton_poi_fused_add_11.run(buf19, primals_3, buf12, primals_6, primals_12, 64, grid=grid(64), stream=stream0)
del primals_12
return (buf19, primals_3, primals_6, primals_7, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from functools import partial
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = q @ k.transpose(-2, -1) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn
.GELU, norm_layer=partial(nn.LayerNorm, eps=1e-06), vis=False):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias,
qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
from functools import partial
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-06
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (16, 4), (4, 1))
assert_size_stride(primals_10, (16,), (1,))
assert_size_stride(primals_11, (4, 16), (16, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(16, 4)](buf3, buf5, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused__softmax_5[grid(256)](buf7, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_6[grid(16, 4)](buf3, buf9, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf3
buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_7[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0)
del buf10
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12)
buf13 = buf1
del buf1
buf14 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_3, buf12,
primals_6, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_9[grid(64)](primals_3, buf12,
primals_6, buf13, buf14, primals_7, primals_8, buf15, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf13
del buf14
del primals_8
buf16 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0)
del buf7
extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf16)
del primals_10
buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_gelu_10[grid(256)](buf16, buf17, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0)
del buf18
triton_poi_fused_add_11[grid(64)](buf19, primals_3, buf12,
primals_6, primals_12, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_12
return buf19, primals_3, primals_6, primals_7, reinterpret_tensor(buf2,
(16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0
), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0
), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0
), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16,
1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = q @ k.transpose(-2, -1) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class BlockNew(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn
.GELU, norm_layer=partial(nn.LayerNorm, eps=1e-06), vis=False):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias,
qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, input_0):
primals_1 = self.norm1.weight
primals_2 = self.norm1.bias
primals_4 = self.attn.qkv.weight
primals_5 = self.attn.proj.weight
primals_6 = self.attn.proj.bias
primals_7 = self.norm2.weight
primals_8 = self.norm2.bias
primals_9 = self.mlp.fc1.weight
primals_10 = self.mlp.fc1.bias
primals_11 = self.mlp.fc2.weight
primals_12 = self.mlp.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| xuewengeophysics/Conformer | Block | false | 13,141 | [
"Apache-2.0"
]
| 0 | e769a1ac9ab110dae2a356a4de1e06ccd0e95041 | https://github.com/xuewengeophysics/Conformer/tree/e769a1ac9ab110dae2a356a4de1e06ccd0e95041 |
Biaffine | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/bc/cbcett6ey62xkijoadrmiqwnmvdqa242vrdqwxiw4pvecwqjoged.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %full_default], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 5, 5), (25, 5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 80, grid=grid(80), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 5), (80, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 16, 5), (0, 5, 1), 0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(primals_2, buf2, 80, grid=grid(80), stream=stream0)
del primals_2
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.bmm]
extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (4, 5, 4), (20, 1, 5), 0), out=buf3)
del buf1
return (reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 1, 4, 4), 0), reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0), reinterpret_tensor(buf0, (1, 5, 16), (80, 1, 5), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 5, 5), (25, 5, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.autograd
import torch.nn as nn
class Biaffine(nn.Module):
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super(Biaffine, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
weight = torch.zeros((n_out, n_in + int(bias_x), n_in + int(bias_y)))
nn.init.xavier_normal_(weight)
self.weight = nn.Parameter(weight, requires_grad=True)
def extra_repr(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return s
def forward(self, x, y):
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y)
s = s.permute(0, 2, 3, 1)
return s
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.autograd
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + x2, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 5, 5), (25, 5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(80)](primals_1, buf0, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((1, 16, 5), (80, 5, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 16, 5), (0, 5, 1),
0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
triton_poi_fused_cat_0[grid(80)](primals_2, buf2, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (4, 5, 4), (20, 1,
5), 0), out=buf3)
del buf1
return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 1, 4, 4), 0
), reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0
), reinterpret_tensor(buf0, (1, 5, 16), (80, 1, 5), 0)
class BiaffineNew(nn.Module):
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super(BiaffineNew, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
weight = torch.zeros((n_out, n_in + int(bias_x), n_in + int(bias_y)))
nn.init.xavier_normal_(weight)
self.weight = nn.Parameter(weight, requires_grad=True)
def extra_repr(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return s
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| yifding/W2NER | Biaffine | false | 13,142 | [
"MIT"
]
| 0 | d13128e45f3930a8b8faa794318939dc90a75974 | https://github.com/yifding/W2NER/tree/d13128e45f3930a8b8faa794318939dc90a75974 |
SmoothBCEwLogits | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/lk/clkucjdow3uaxprosshl3chftjoqkfj653zmoyf4qvo2pyac4ss6.py
# Topologically Sorted Source Nodes: [mul, targets, loss, loss_1], Original ATen: [aten.mul, aten.add, aten.binary_cross_entropy_with_logits, aten.mean]
# Source node to ATen node mapping:
# loss => abs_1, exp, full_default, log1p, mean, minimum, mul_1, neg, sub, sub_1, sub_2
# loss_1 => mean_1
# mul => mul
# targets => add
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 1.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0.0), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %add), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg0_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg0_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %sub_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mean,), kwargs = {})
triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_0 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp6 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = tmp2 + tmp3
tmp5 = tmp1 - tmp4
tmp7 = tmp5 * tmp6
tmp8 = triton_helpers.minimum(tmp3, tmp6)
tmp9 = tl_math.abs(tmp6)
tmp10 = -tmp9
tmp11 = tl_math.exp(tmp10)
tmp12 = libdevice.log1p(tmp11)
tmp13 = tmp8 - tmp12
tmp14 = tmp7 - tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = 256.0
tmp19 = tmp17 / tmp18
tmp20 = tmp19 / tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp20, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, targets, loss, loss_1], Original ATen: [aten.mul, aten.add, aten.binary_cross_entropy_with_logits, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _WeightedLoss
class SmoothBCEwLogits(_WeightedLoss):
def __init__(self, weight=None, reduction='mean', smoothing=0.0):
super().__init__(weight=weight, reduction=reduction)
self.smoothing = smoothing
self.weight = weight
self.reduction = reduction
@staticmethod
def _smooth(targets: 'torch.Tensor', n_labels: 'int', smoothing=0.0):
assert 0 <= smoothing < 1
with torch.no_grad():
targets = targets * (1.0 - smoothing) + 0.5 * smoothing
return targets
def forward(self, inputs, targets):
targets = SmoothBCEwLogits._smooth(targets, inputs.size(-1), self.
smoothing)
loss = F.binary_cross_entropy_with_logits(inputs, targets, self.weight)
if self.reduction == 'sum':
loss = loss.sum()
elif self.reduction == 'mean':
loss = loss.mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn.modules.loss import _WeightedLoss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp6 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = tmp2 + tmp3
tmp5 = tmp1 - tmp4
tmp7 = tmp5 * tmp6
tmp8 = triton_helpers.minimum(tmp3, tmp6)
tmp9 = tl_math.abs(tmp6)
tmp10 = -tmp9
tmp11 = tl_math.exp(tmp10)
tmp12 = libdevice.log1p(tmp11)
tmp13 = tmp8 - tmp12
tmp14 = tmp7 - tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = 256.0
tmp19 = tmp17 / tmp18
tmp20 = tmp19 / tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_0[grid
(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class SmoothBCEwLogitsNew(_WeightedLoss):
def __init__(self, weight=None, reduction='mean', smoothing=0.0):
super().__init__(weight=weight, reduction=reduction)
self.smoothing = smoothing
self.weight = weight
self.reduction = reduction
@staticmethod
def _smooth(targets: 'torch.Tensor', n_labels: 'int', smoothing=0.0):
assert 0 <= smoothing < 1
with torch.no_grad():
targets = targets * (1.0 - smoothing) + 0.5 * smoothing
return targets
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| yota-p/kaggle_titanic | SmoothBCEwLogits | false | 13,143 | [
"MIT"
]
| 0 | 36d2c53711482195f519d9280abadf0d6afa9a15 | https://github.com/yota-p/kaggle_titanic/tree/36d2c53711482195f519d9280abadf0d6afa9a15 |
LayerNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/ud/cud5diybwrcgcrwz72q7ptq4ubvr3sly4tc53m6uwovofwm4pt6u.py
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.sub]
# Source node to ATen node mapping:
# outputs => sub
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %unsqueeze), kwargs = {})
triton_poi_fused_sub_0 = async_compile.triton('triton_poi_fused_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/bg/cbgcye5badtvwqoviuubf4kx36eubwcypcerx76dcgmrx4tnxt2e.py
# Topologically Sorted Source Nodes: [add, std, outputs_1, outputs_2, outputs_3], Original ATen: [aten.add, aten.pow, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# outputs_1 => div
# outputs_2 => mul
# outputs_3 => add_1
# std => pow_2
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_1, 1e-12), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %pow_2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_1), kwargs = {})
triton_poi_fused_add_div_mul_pow_1 = async_compile.triton('triton_poi_fused_add_div_mul_pow_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_pow_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 1e-12
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tmp0 / tmp16
tmp19 = tmp17 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_sub_0.run(primals_3, buf0, 256, grid=grid(256), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, std, outputs_1, outputs_2, outputs_3], Original ATen: [aten.add, aten.pow, aten.div, aten.mul]
triton_poi_fused_add_div_mul_pow_1.run(buf0, primals_2, primals_1, buf1, 256, grid=grid(256), stream=stream0)
del buf0
del primals_1
del primals_2
return (buf1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.autograd
import torch.nn as nn
class LayerNorm(nn.Module):
def __init__(self, input_dim, cond_dim=0, center=True, scale=True,
epsilon=None, conditional=False, hidden_units=None,
hidden_activation='linear', hidden_initializer='xaiver', **kwargs):
super(LayerNorm, self).__init__()
"""
input_dim: inputs.shape[-1]
cond_dim: cond.shape[-1]
"""
self.center = center
self.scale = scale
self.conditional = conditional
self.hidden_units = hidden_units
self.hidden_initializer = hidden_initializer
self.epsilon = epsilon or 1e-12
self.input_dim = input_dim
self.cond_dim = cond_dim
if self.center:
self.beta = nn.Parameter(torch.zeros(input_dim))
if self.scale:
self.gamma = nn.Parameter(torch.ones(input_dim))
if self.conditional:
if self.hidden_units is not None:
self.hidden_dense = nn.Linear(in_features=self.cond_dim,
out_features=self.hidden_units, bias=False)
if self.center:
self.beta_dense = nn.Linear(in_features=self.cond_dim,
out_features=input_dim, bias=False)
if self.scale:
self.gamma_dense = nn.Linear(in_features=self.cond_dim,
out_features=input_dim, bias=False)
self.initialize_weights()
def initialize_weights(self):
if self.conditional:
if self.hidden_units is not None:
if self.hidden_initializer == 'normal':
torch.nn.init.normal(self.hidden_dense.weight)
elif self.hidden_initializer == 'xavier':
torch.nn.init.xavier_uniform_(self.hidden_dense.weight)
if self.center:
torch.nn.init.constant_(self.beta_dense.weight, 0)
if self.scale:
torch.nn.init.constant_(self.gamma_dense.weight, 0)
def forward(self, inputs, cond=None):
if self.conditional:
if self.hidden_units is not None:
cond = self.hidden_dense(cond)
for _ in range(len(inputs.shape) - len(cond.shape)):
cond = cond.unsqueeze(1)
if self.center:
beta = self.beta_dense(cond) + self.beta
if self.scale:
gamma = self.gamma_dense(cond) + self.gamma
else:
if self.center:
beta = self.beta
if self.scale:
gamma = self.gamma
outputs = inputs
if self.center:
mean = torch.mean(outputs, dim=-1).unsqueeze(-1)
outputs = outputs - mean
if self.scale:
variance = torch.mean(outputs ** 2, dim=-1).unsqueeze(-1)
std = (variance + self.epsilon) ** 0.5
outputs = outputs / std
outputs = outputs * gamma
if self.center:
outputs = outputs + beta
return outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.autograd
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_pow_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 1e-12
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tmp0 / tmp16
tmp19 = tmp17 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_0[grid(256)](primals_3, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_pow_1[grid(256)](buf0, primals_2,
primals_1, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_1
del primals_2
return buf1, primals_3
class LayerNormNew(nn.Module):
def __init__(self, input_dim, cond_dim=0, center=True, scale=True,
epsilon=None, conditional=False, hidden_units=None,
hidden_activation='linear', hidden_initializer='xaiver', **kwargs):
super(LayerNormNew, self).__init__()
"""
input_dim: inputs.shape[-1]
cond_dim: cond.shape[-1]
"""
self.center = center
self.scale = scale
self.conditional = conditional
self.hidden_units = hidden_units
self.hidden_initializer = hidden_initializer
self.epsilon = epsilon or 1e-12
self.input_dim = input_dim
self.cond_dim = cond_dim
if self.center:
self.beta = nn.Parameter(torch.zeros(input_dim))
if self.scale:
self.gamma = nn.Parameter(torch.ones(input_dim))
if self.conditional:
if self.hidden_units is not None:
self.hidden_dense = nn.Linear(in_features=self.cond_dim,
out_features=self.hidden_units, bias=False)
if self.center:
self.beta_dense = nn.Linear(in_features=self.cond_dim,
out_features=input_dim, bias=False)
if self.scale:
self.gamma_dense = nn.Linear(in_features=self.cond_dim,
out_features=input_dim, bias=False)
self.initialize_weights()
def initialize_weights(self):
if self.conditional:
if self.hidden_units is not None:
if self.hidden_initializer == 'normal':
torch.nn.init.normal(self.hidden_dense.weight)
elif self.hidden_initializer == 'xavier':
torch.nn.init.xavier_uniform_(self.hidden_dense.weight)
if self.center:
torch.nn.init.constant_(self.beta_dense.weight, 0)
if self.scale:
torch.nn.init.constant_(self.gamma_dense.weight, 0)
def forward(self, input_0):
primals_1 = self.beta
primals_2 = self.gamma
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| yifding/W2NER | LayerNorm | false | 13,144 | [
"MIT"
]
| 0 | d13128e45f3930a8b8faa794318939dc90a75974 | https://github.com/yifding/W2NER/tree/d13128e45f3930a8b8faa794318939dc90a75974 |
CriticDownAction | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/pi/cpigaeuevuwuziljrv6gibhffc76hnzwid25r3axhvnzlkp4wl7f.py
# Topologically Sorted Source Nodes: [xum], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# xum => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2, %primals_3], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = (xindex // 12)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/m6/cm6ozsdmt5vl54fxwk7cgktzswysgn2c37vsaybpucplzehkrnnz.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_5 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = (xindex // 1600)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/64/c64g5uxk2a5hbzuhd6oikla2gb5eyfjjb6kbh7btzswha52gl5ex.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = (xindex // 1200)
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + (1216*x2)), tmp4, xmask)
tl.store(out_ptr1 + (x3 + (1280*x2)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/4h/c4h6r6vefoeuinm5eqv2d6wqmfj2mnjacalp633y3m6bnseb2bnk.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.relu, aten.view]
# Source node to ATen node mapping:
# x_1 => relu_1
# x_2 => view_4
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %view_4 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu_1, [64, 300]), kwargs = {})
triton_poi_fused_relu_view_3 = async_compile.triton('triton_poi_fused_relu_view_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_view_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = (xindex // 300)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (300*(x1 % 4)) + (1216*(x1 // 4))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/r7/cr7t2pqydwjqvhltmfwhzh7g6thvck2skrsaimf3kvqrn2podpxh.py
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_2 => cat_1
# Graph fragment:
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%tanh, %tanh_1], -1), kwargs = {})
triton_poi_fused_cat_4 = async_compile.triton('triton_poi_fused_cat_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp4, tmp6, tmp7)
tmp9 = tmp0 >= tmp3
tmp10 = tl.full([1], 8, tl.int64)
tmp11 = tmp0 < tmp10
tmp12 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = libdevice.tanh(tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp9, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp8, tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/tc/ctcyhc3j7gcndovmump6plsamm3m4336gv7k75wru64k3klwdqb5.py
# Topologically Sorted Source Nodes: [msg_down], Original ATen: [aten.linalg_vector_norm, aten.div]
# Source node to ATen node mapping:
# msg_down => div, pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_17, 2.0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_17, %expand), kwargs = {})
triton_per_fused_div_linalg_vector_norm_5 = async_compile.triton('triton_per_fused_div_linalg_vector_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_linalg_vector_norm_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_linalg_vector_norm_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp7 = 1e-12
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp0 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr0 + (r1 + (16*x0)), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (400, 12), (12, 1))
assert_size_stride(primals_5, (400, ), (1, ))
assert_size_stride(primals_6, (300, 400), (400, 1))
assert_size_stride(primals_7, (300, ), (1, ))
assert_size_stride(primals_8, (1, 300), (300, 1))
assert_size_stride(primals_9, (1, ), (1, ))
assert_size_stride(primals_10, (400, 12), (12, 1))
assert_size_stride(primals_11, (400, ), (1, ))
assert_size_stride(primals_12, (300, 400), (400, 1))
assert_size_stride(primals_13, (300, ), (1, ))
assert_size_stride(primals_14, (1, 300), (300, 1))
assert_size_stride(primals_15, (1, ), (1, ))
assert_size_stride(primals_16, (400, 8), (8, 1))
assert_size_stride(primals_17, (400, ), (1, ))
assert_size_stride(primals_18, (300, 400), (400, 1))
assert_size_stride(primals_19, (300, ), (1, ))
assert_size_stride(primals_20, (16, 300), (300, 1))
assert_size_stride(primals_21, (16, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.float32)
# Topologically Sorted Source Nodes: [xum], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, primals_3, buf0, 768, grid=grid(768), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 12), (12, 1), 0), reinterpret_tensor(primals_4, (12, 400), (1, 12), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf1 # reuse
buf30 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_5, buf30, 25600, grid=grid(25600), stream=stream0)
del primals_5
buf3 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 300), (1, 400), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32)
buf29 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf3, primals_7, buf4, buf29, 19200, grid=grid(19200), stream=stream0)
del primals_7
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.relu, aten.view]
triton_poi_fused_relu_view_3.run(buf4, buf5, 19200, grid=grid(19200), stream=stream0)
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf7)
del primals_9
buf8 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 12), (12, 1), 0), reinterpret_tensor(primals_10, (12, 400), (1, 12), 0), out=buf8)
del primals_10
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf8 # reuse
buf28 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf9, primals_11, buf28, 25600, grid=grid(25600), stream=stream0)
del primals_11
buf10 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf9, (64, 400), (400, 1), 0), reinterpret_tensor(primals_12, (400, 300), (1, 400), 0), out=buf10)
buf11 = buf4; del buf4 # reuse
buf27 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf10, primals_13, buf11, buf27, 19200, grid=grid(19200), stream=stream0)
del primals_13
buf12 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.relu, aten.view]
triton_poi_fused_relu_view_3.run(buf11, buf12, 19200, grid=grid(19200), stream=stream0)
buf14 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_15, buf12, reinterpret_tensor(primals_14, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf14)
del primals_15
buf15 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_4.run(primals_1, primals_3, buf15, 512, grid=grid(512), stream=stream0)
del primals_1
del primals_3
buf16 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf15, (64, 8), (8, 1), 0), reinterpret_tensor(primals_16, (8, 400), (1, 8), 0), out=buf16)
del primals_16
buf17 = reinterpret_tensor(buf16, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf16 # reuse
buf26 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf17, primals_17, buf26, 25600, grid=grid(25600), stream=stream0)
del primals_17
buf18 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf17, (64, 400), (400, 1), 0), reinterpret_tensor(primals_18, (400, 300), (1, 400), 0), out=buf18)
buf19 = buf11; del buf11 # reuse
buf25 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf18, primals_19, buf19, buf25, 19200, grid=grid(19200), stream=stream0)
del primals_19
buf20 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [x_7, x_8], Original ATen: [aten.relu, aten.view]
triton_poi_fused_relu_view_3.run(buf19, buf20, 19200, grid=grid(19200), stream=stream0)
del buf19
buf21 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_21, buf20, reinterpret_tensor(primals_20, (300, 16), (1, 300), 0), alpha=1, beta=1, out=buf21)
del primals_21
buf22 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf23 = reinterpret_tensor(buf22, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf22 # reuse
buf24 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [msg_down], Original ATen: [aten.linalg_vector_norm, aten.div]
triton_per_fused_div_linalg_vector_norm_5.run(buf23, buf21, buf24, 64, 16, grid=grid(64), stream=stream0)
return (reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(buf14, (4, 4, 4, 1), (16, 4, 1, 1), 0), buf24, reinterpret_tensor(buf0, (64, 12), (12, 1), 0), reinterpret_tensor(buf2, (64, 400), (400, 1), 0), buf5, reinterpret_tensor(buf9, (64, 400), (400, 1), 0), buf12, reinterpret_tensor(buf15, (64, 8), (8, 1), 0), reinterpret_tensor(buf17, (64, 400), (400, 1), 0), buf20, buf21, buf23, primals_20, buf25, primals_18, buf26, primals_14, buf27, primals_12, buf28, primals_8, buf29, primals_6, buf30, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((400, 12), (12, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 300), (300, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((400, 12), (12, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((1, 300), (300, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((400, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((16, 300), (300, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class MLPBase(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(MLPBase, self).__init__()
self.l1 = nn.Linear(num_inputs, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, num_outputs)
def forward(self, inputs):
x = F.relu(self.l1(inputs))
x = F.relu(self.l2(x))
x = self.l3(x)
return x
class CriticDownAction(nn.Module):
"""a top-down module used in bothway message passing that passes messages to children and outputs q-values"""
def __init__(self, self_input_dim, action_dim, msg_dim, max_children):
super(CriticDownAction, self).__init__()
self.baseQ1 = MLPBase(self_input_dim + action_dim + msg_dim, 1)
self.baseQ2 = MLPBase(self_input_dim + action_dim + msg_dim, 1)
self.msg_base = MLPBase(self_input_dim + msg_dim, msg_dim *
max_children)
def forward(self, x, u, m):
xum = torch.cat([x, u, m], dim=-1)
x1 = self.baseQ1(xum)
x2 = self.baseQ2(xum)
xm = torch.cat([x, m], dim=-1)
xm = torch.tanh(xm)
msg_down = self.msg_base(xm)
msg_down = F.normalize(msg_down, dim=-1)
return x1, x2, msg_down
def Q1(self, x, u, m):
xum = torch.cat([x, u, m], dim=-1)
x1 = self.baseQ1(xum)
xm = torch.cat([x, m], dim=-1)
xm = torch.tanh(xm)
msg_down = self.msg_base(xm)
msg_down = F.normalize(msg_down, dim=-1)
return x1, msg_down
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'self_input_dim': 4, 'action_dim': 4, 'msg_dim': 4,
'max_children': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = xindex // 1200
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = xindex // 300
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp4, tmp6, tmp7)
tmp9 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp12 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = libdevice.tanh(tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp9, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp8, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_per_fused_div_linalg_vector_norm_5(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp7 = 1e-12
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp0 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp9, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (400, 12), (12, 1))
assert_size_stride(primals_5, (400,), (1,))
assert_size_stride(primals_6, (300, 400), (400, 1))
assert_size_stride(primals_7, (300,), (1,))
assert_size_stride(primals_8, (1, 300), (300, 1))
assert_size_stride(primals_9, (1,), (1,))
assert_size_stride(primals_10, (400, 12), (12, 1))
assert_size_stride(primals_11, (400,), (1,))
assert_size_stride(primals_12, (300, 400), (400, 1))
assert_size_stride(primals_13, (300,), (1,))
assert_size_stride(primals_14, (1, 300), (300, 1))
assert_size_stride(primals_15, (1,), (1,))
assert_size_stride(primals_16, (400, 8), (8, 1))
assert_size_stride(primals_17, (400,), (1,))
assert_size_stride(primals_18, (300, 400), (400, 1))
assert_size_stride(primals_19, (300,), (1,))
assert_size_stride(primals_20, (16, 300), (300, 1))
assert_size_stride(primals_21, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(768)](primals_1, primals_2, primals_3,
buf0, 768, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 12), (12, 1), 0),
reinterpret_tensor(primals_4, (12, 400), (1, 12), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf1
buf30 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(25600)](buf2,
primals_5, buf30, 25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf3 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_6, (400, 300), (1, 400), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1),
torch.float32)
buf29 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(19200)](buf3,
primals_7, buf4, buf29, 19200, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_7
buf5 = buf3
del buf3
triton_poi_fused_relu_view_3[grid(19200)](buf4, buf5, 19200, XBLOCK
=256, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8,
(300, 1), (1, 300), 0), alpha=1, beta=1, out=buf7)
del primals_9
buf8 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 12), (12, 1), 0),
reinterpret_tensor(primals_10, (12, 400), (1, 12), 0), out=buf8)
del primals_10
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf8
buf28 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(25600)](buf9,
primals_11, buf28, 25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf10 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_12, (400, 300), (1, 400), 0), out=buf10)
buf11 = buf4
del buf4
buf27 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(19200)](buf10,
primals_13, buf11, buf27, 19200, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_13
buf12 = buf10
del buf10
triton_poi_fused_relu_view_3[grid(19200)](buf11, buf12, 19200,
XBLOCK=256, num_warps=4, num_stages=1)
buf14 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_15, buf12, reinterpret_tensor(
primals_14, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf14)
del primals_15
buf15 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32
)
triton_poi_fused_cat_4[grid(512)](primals_1, primals_3, buf15, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_3
buf16 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf15, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_16, (8, 400), (1, 8), 0), out=buf16)
del primals_16
buf17 = reinterpret_tensor(buf16, (4, 4, 4, 400), (6400, 1600, 400,
1), 0)
del buf16
buf26 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(25600)](buf17,
primals_17, buf26, 25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_17
buf18 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf17, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_18, (400, 300), (1, 400), 0), out=buf18)
buf19 = buf11
del buf11
buf25 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(19200)](buf18,
primals_19, buf19, buf25, 19200, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_19
buf20 = buf18
del buf18
triton_poi_fused_relu_view_3[grid(19200)](buf19, buf20, 19200,
XBLOCK=256, num_warps=4, num_stages=1)
del buf19
buf21 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_21, buf20, reinterpret_tensor(
primals_20, (300, 16), (1, 300), 0), alpha=1, beta=1, out=buf21)
del primals_21
buf22 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf23 = reinterpret_tensor(buf22, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf22
buf24 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.
float32)
triton_per_fused_div_linalg_vector_norm_5[grid(64)](buf23, buf21,
buf24, 64, 16, XBLOCK=32, num_warps=4, num_stages=1)
return (reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0),
reinterpret_tensor(buf14, (4, 4, 4, 1), (16, 4, 1, 1), 0), buf24,
reinterpret_tensor(buf0, (64, 12), (12, 1), 0), reinterpret_tensor(
buf2, (64, 400), (400, 1), 0), buf5, reinterpret_tensor(buf9, (64,
400), (400, 1), 0), buf12, reinterpret_tensor(buf15, (64, 8), (8, 1
), 0), reinterpret_tensor(buf17, (64, 400), (400, 1), 0), buf20,
buf21, buf23, primals_20, buf25, primals_18, buf26, primals_14,
buf27, primals_12, buf28, primals_8, buf29, primals_6, buf30)
class MLPBase(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(MLPBase, self).__init__()
self.l1 = nn.Linear(num_inputs, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, num_outputs)
def forward(self, inputs):
x = F.relu(self.l1(inputs))
x = F.relu(self.l2(x))
x = self.l3(x)
return x
class CriticDownActionNew(nn.Module):
"""a top-down module used in bothway message passing that passes messages to children and outputs q-values"""
def __init__(self, self_input_dim, action_dim, msg_dim, max_children):
super(CriticDownActionNew, self).__init__()
self.baseQ1 = MLPBase(self_input_dim + action_dim + msg_dim, 1)
self.baseQ2 = MLPBase(self_input_dim + action_dim + msg_dim, 1)
self.msg_base = MLPBase(self_input_dim + msg_dim, msg_dim *
max_children)
def Q1(self, x, u, m):
xum = torch.cat([x, u, m], dim=-1)
x1 = self.baseQ1(xum)
xm = torch.cat([x, m], dim=-1)
xm = torch.tanh(xm)
msg_down = self.msg_base(xm)
msg_down = F.normalize(msg_down, dim=-1)
return x1, msg_down
def forward(self, input_0, input_1, input_2):
primals_4 = self.baseQ1.l1.weight
primals_5 = self.baseQ1.l1.bias
primals_6 = self.baseQ1.l2.weight
primals_7 = self.baseQ1.l2.bias
primals_8 = self.baseQ1.l3.weight
primals_9 = self.baseQ1.l3.bias
primals_10 = self.baseQ2.l1.weight
primals_11 = self.baseQ2.l1.bias
primals_12 = self.baseQ2.l2.weight
primals_13 = self.baseQ2.l2.bias
primals_14 = self.baseQ2.l3.weight
primals_15 = self.baseQ2.l3.bias
primals_16 = self.msg_base.l1.weight
primals_17 = self.msg_base.l1.bias
primals_18 = self.msg_base.l2.weight
primals_19 = self.msg_base.l2.bias
primals_20 = self.msg_base.l3.weight
primals_21 = self.msg_base.l3.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21])
return output[0], output[1], output[2]
| yangfanthu/modular-rl | CriticDownAction | false | 13,145 | [
"BSD-2-Clause"
]
| 0 | 25c599bab641a7e732dbaf116cd240fa2358f113 | https://github.com/yangfanthu/modular-rl/tree/25c599bab641a7e732dbaf116cd240fa2358f113 |
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/p7/cp7xzeyl6japtnkojqx5iupjksot3nuocbambsy2o3yflsevkl5j.py
# Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_2 => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_5,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/s2/cs2rk3o3kmhydx4oijp6rsdb5atcrq5axy4adadrpl7gkt7scies.py
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_2 => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_2 => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/jq/cjqs74o7upz4nudmv2dbjbmggtmuj2mktvc7gygosrcj2d2xxghd.py
# Topologically Sorted Source Nodes: [result_3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# result_3 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %view_13], -1), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x1 + (16*((-4) + x0))), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/t2/ct2fj4cxakdevxwd5upea4iyfznuislybj5p4wd6jgtx5ayzurnk.py
# Topologically Sorted Source Nodes: [result_6, result_7], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# result_6 => add
# result_7 => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_15, %primals_2), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/6g/c6gddvf7hivp3xdh4pyazhygzjkdnh5sxyn6itmcverzcfqnfwwt.py
# Topologically Sorted Source Nodes: [result_6, result_7], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# result_6 => add
# result_7 => add_1, add_2, mul, mul_1, rsqrt, sub_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_15, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_8), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_9), kwargs = {})
triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 8), (8, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf2, buf3, 4, 16, grid=grid(4, 16), stream=stream0)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf0, buf4, 4, 16, grid=grid(4, 16), stream=stream0)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf6
buf8 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, buf8, 4, 16, grid=grid(4, 16), stream=stream0)
buf9 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [result], Original ATen: [aten.bmm]
extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [result_3], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(primals_2, buf9, buf10, 128, grid=grid(128), stream=stream0)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [result_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf10, (16, 8), (8, 1), 0), reinterpret_tensor(primals_6, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf11)
del primals_7
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [result_6, result_7], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_4.run(buf11, primals_2, buf12, buf13, 16, grid=grid(16), stream=stream0)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [result_6, result_7], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_5.run(buf11, primals_2, buf12, buf13, primals_8, primals_9, buf14, 64, grid=grid(64), stream=stream0)
del buf12
del buf13
del primals_9
return (buf14, buf7, primals_2, primals_8, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 8), (8, 1), 0), buf11, primals_6, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch as t
class Linear(nn.Module):
"""
Linear Module
"""
def __init__(self, in_dim, out_dim, bias=True, w_init='linear'):
"""
:param in_dim: dimension of input
:param out_dim: dimension of output
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super(Linear, self).__init__()
self.linear_layer = nn.Linear(in_dim, out_dim, bias=bias)
nn.init.xavier_uniform_(self.linear_layer.weight, gain=nn.init.
calculate_gain(w_init))
def forward(self, x):
return self.linear_layer(x)
class MultiheadAttention(nn.Module):
"""
Multihead attention mechanism (dot attention)
"""
def __init__(self, num_hidden_k):
"""
:param num_hidden_k: dimension of hidden
"""
super(MultiheadAttention, self).__init__()
self.num_hidden_k = num_hidden_k
self.attn_dropout = nn.Dropout(p=0.1)
def forward(self, key, value, query, mask=None, query_mask=None):
attn = t.bmm(query, key.transpose(1, 2))
attn = attn / math.sqrt(self.num_hidden_k)
if mask is not None:
attn = attn.masked_fill(mask, -2 ** 32 + 1)
attn = t.softmax(attn, dim=-1)
else:
attn = t.softmax(attn, dim=-1)
if query_mask is not None:
attn = attn * query_mask
attn = self.attn_dropout(attn)
result = t.bmm(attn, value)
return result, attn
class Attention(nn.Module):
"""
Attention Network
"""
def __init__(self, num_hidden, h=4):
"""
:param num_hidden: dimension of hidden
:param h: num of heads
"""
super(Attention, self).__init__()
self.num_hidden = num_hidden
self.num_hidden_per_attn = num_hidden // h
self.h = h
self.key = Linear(num_hidden, num_hidden, bias=False)
self.value = Linear(num_hidden, num_hidden, bias=False)
self.query = Linear(num_hidden, num_hidden, bias=False)
self.multihead = MultiheadAttention(self.num_hidden_per_attn)
self.residual_dropout = nn.Dropout(p=0.1)
self.final_linear = Linear(num_hidden * 2, num_hidden)
self.layer_norm_1 = nn.LayerNorm(num_hidden)
def forward(self, memory, decoder_input, mask=None, query_mask=None):
batch_size = memory.size(0)
seq_k = memory.size(1)
seq_q = decoder_input.size(1)
if query_mask is not None:
query_mask = query_mask.unsqueeze(-1).repeat(1, 1, seq_k)
query_mask = query_mask.repeat(self.h, 1, 1)
if mask is not None:
mask = mask.repeat(self.h, 1, 1)
key = self.key(memory).view(batch_size, seq_k, self.h, self.
num_hidden_per_attn)
value = self.value(memory).view(batch_size, seq_k, self.h, self.
num_hidden_per_attn)
query = self.query(decoder_input).view(batch_size, seq_q, self.h,
self.num_hidden_per_attn)
key = key.permute(2, 0, 1, 3).contiguous().view(-1, seq_k, self.
num_hidden_per_attn)
value = value.permute(2, 0, 1, 3).contiguous().view(-1, seq_k, self
.num_hidden_per_attn)
query = query.permute(2, 0, 1, 3).contiguous().view(-1, seq_q, self
.num_hidden_per_attn)
result, attns = self.multihead(key, value, query, mask=mask,
query_mask=query_mask)
result = result.view(self.h, batch_size, seq_q, self.
num_hidden_per_attn)
result = result.permute(1, 2, 0, 3).contiguous().view(batch_size,
seq_q, -1)
result = t.cat([decoder_input, result], dim=-1)
result = self.final_linear(result)
result = self.residual_dropout(result)
result = result + decoder_input
result = self.layer_norm_1(result)
return result, attns
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_hidden': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch as t
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x1 + 16 * (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 8), (8, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(4, 16)](buf2, buf3, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf2
triton_poi_fused_clone_0[grid(4, 16)](buf0, buf4, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = buf5
del buf5
triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf6
buf8 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(4, 16)](buf1, buf8, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0)
del buf1
extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 1), (4, 1,
0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_3[grid(128)](primals_2, buf9, buf10, 128,
XBLOCK=128, num_warps=4, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_7, reinterpret_tensor(buf10, (16, 8),
(8, 1), 0), reinterpret_tensor(primals_6, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf11)
del primals_7
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_4[grid(16)](buf11, primals_2,
buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(64)](buf11, primals_2,
buf12, buf13, primals_8, primals_9, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf12
del buf13
del primals_9
return buf14, buf7, primals_2, primals_8, reinterpret_tensor(primals_1,
(16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 8), (8, 1), 0
), buf11, primals_6, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0)
class Linear(nn.Module):
"""
Linear Module
"""
def __init__(self, in_dim, out_dim, bias=True, w_init='linear'):
"""
:param in_dim: dimension of input
:param out_dim: dimension of output
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super(Linear, self).__init__()
self.linear_layer = nn.Linear(in_dim, out_dim, bias=bias)
nn.init.xavier_uniform_(self.linear_layer.weight, gain=nn.init.
calculate_gain(w_init))
def forward(self, x):
return self.linear_layer(x)
class MultiheadAttention(nn.Module):
"""
Multihead attention mechanism (dot attention)
"""
def __init__(self, num_hidden_k):
"""
:param num_hidden_k: dimension of hidden
"""
super(MultiheadAttention, self).__init__()
self.num_hidden_k = num_hidden_k
self.attn_dropout = nn.Dropout(p=0.1)
def forward(self, key, value, query, mask=None, query_mask=None):
attn = t.bmm(query, key.transpose(1, 2))
attn = attn / math.sqrt(self.num_hidden_k)
if mask is not None:
attn = attn.masked_fill(mask, -2 ** 32 + 1)
attn = t.softmax(attn, dim=-1)
else:
attn = t.softmax(attn, dim=-1)
if query_mask is not None:
attn = attn * query_mask
attn = self.attn_dropout(attn)
result = t.bmm(attn, value)
return result, attn
class AttentionNew(nn.Module):
"""
Attention Network
"""
def __init__(self, num_hidden, h=4):
"""
:param num_hidden: dimension of hidden
:param h: num of heads
"""
super(AttentionNew, self).__init__()
self.num_hidden = num_hidden
self.num_hidden_per_attn = num_hidden // h
self.h = h
self.key = Linear(num_hidden, num_hidden, bias=False)
self.value = Linear(num_hidden, num_hidden, bias=False)
self.query = Linear(num_hidden, num_hidden, bias=False)
self.multihead = MultiheadAttention(self.num_hidden_per_attn)
self.residual_dropout = nn.Dropout(p=0.1)
self.final_linear = Linear(num_hidden * 2, num_hidden)
self.layer_norm_1 = nn.LayerNorm(num_hidden)
def forward(self, input_0, input_1):
primals_3 = self.key.linear_layer.weight
primals_4 = self.value.linear_layer.weight
primals_5 = self.query.linear_layer.weight
primals_6 = self.final_linear.linear_layer.weight
primals_7 = self.final_linear.linear_layer.bias
primals_8 = self.layer_norm_1.weight
primals_9 = self.layer_norm_1.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
| yhgon/Transformer-TTS | Attention | false | 13,146 | [
"MIT"
]
| 0 | 5f34945cb5500d484275700c4e393ed125d5e753 | https://github.com/yhgon/Transformer-TTS/tree/5f34945cb5500d484275700c4e393ed125d5e753 |
SelfAttentionLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/er/cer6wyrsysb27mf4qvndrgb67as5vdr6kshph65tjwxcgctyw35g.py
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# tanh => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%mm,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/vm/cvmz5zb5hklcqpb7jp3aicaos5mk3fnzhheoduici43zwr4y2zyd.py
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze, [0], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tmp5 / tmp8
tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp9, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm]
extern_kernels.mm(buf1, primals_3, out=buf2)
buf5 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf2, buf5, 1, 4, grid=grid(1), stream=stream0)
buf6 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf5, (1, 4), (0, 1), 0), primals_1, out=buf6)
del buf5
return (reinterpret_tensor(buf6, (4, ), (1, ), 0), buf1, buf2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(primals_3, (1, 4), (1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import functional as F
class SelfAttentionLayer(nn.Module):
def __init__(self, dim, da, alpha=0.2, dropout=0.5):
super(SelfAttentionLayer, self).__init__()
self.dim = dim
self.da = da
self.alpha = alpha
self.dropout = dropout
self.a = nn.Parameter(torch.zeros(size=(self.dim, self.da)))
self.b = nn.Parameter(torch.zeros(size=(self.da, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
nn.init.xavier_uniform_(self.b.data, gain=1.414)
def forward(self, h):
h.shape[0]
e = torch.matmul(torch.tanh(torch.matmul(h, self.a)), self.b).squeeze(
dim=1)
attention = F.softmax(e)
return torch.matmul(attention, h)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'da': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tmp5 / tmp8
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf1, primals_3, out=buf2)
buf5 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused__softmax_1[grid(1)](buf2, buf5, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
buf6 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (1, 4), (0, 1), 0),
primals_1, out=buf6)
del buf5
return reinterpret_tensor(buf6, (4,), (1,), 0
), buf1, buf2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_3, (1, 4), (1, 1), 0)
class SelfAttentionLayerNew(nn.Module):
def __init__(self, dim, da, alpha=0.2, dropout=0.5):
super(SelfAttentionLayerNew, self).__init__()
self.dim = dim
self.da = da
self.alpha = alpha
self.dropout = dropout
self.a = nn.Parameter(torch.zeros(size=(self.dim, self.da)))
self.b = nn.Parameter(torch.zeros(size=(self.da, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
nn.init.xavier_uniform_(self.b.data, gain=1.414)
def forward(self, input_0):
primals_1 = self.a
primals_3 = self.b
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| yuka1369/KBRD | SelfAttentionLayer | false | 13,147 | [
"MIT"
]
| 0 | fc0f723c448299f00eef6daabff675640a930c26 | https://github.com/yuka1369/KBRD/tree/fc0f723c448299f00eef6daabff675640a930c26 |
CRF | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/2b/c2bkqvv3w46os3y2jfp7zebz4v4e3jusxayajeubp3hevuxzgzn6.py
# Topologically Sorted Source Nodes: [add_1, max_1], Original ATen: [aten.add, aten.max]
# Source node to ATen node mapping:
# add_1 => add_1
# max_1 => max_1
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_2, %unsqueeze_1), kwargs = {})
# %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%add_1, 1), kwargs = {})
triton_poi_fused_add_max_0 = async_compile.triton('triton_poi_fused_add_max_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_max_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (16*x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (16*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + (16*x1)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2))
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp17 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + (16*x1)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3))
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp24 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp9 = tmp6 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = triton_helpers.maximum(tmp5, tmp11)
tmp16 = tmp13 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp12, tmp18)
tmp23 = tmp20 + tmp22
tmp25 = tmp23 + tmp24
tmp26 = triton_helpers.maximum(tmp19, tmp25)
tmp27 = tmp5 > tmp11
tmp28 = tmp5 == tmp11
tmp29 = tmp5 != tmp5
tmp30 = tmp11 != tmp11
tmp31 = tmp29 > tmp30
tmp32 = tmp27 | tmp31
tmp33 = tmp29 & tmp30
tmp34 = tmp28 | tmp33
tmp35 = tl.full([1], 0, tl.int64)
tmp36 = tl.full([1], 1, tl.int64)
tmp37 = tmp35 < tmp36
tmp38 = tmp34 & tmp37
tmp39 = tmp32 | tmp38
tmp40 = tl.where(tmp39, tmp5, tmp11)
tmp41 = tl.where(tmp39, tmp35, tmp36)
tmp42 = tmp40 > tmp18
tmp43 = tmp40 == tmp18
tmp44 = tmp40 != tmp40
tmp45 = tmp18 != tmp18
tmp46 = tmp44 > tmp45
tmp47 = tmp42 | tmp46
tmp48 = tmp44 & tmp45
tmp49 = tmp43 | tmp48
tmp50 = tl.full([1], 2, tl.int64)
tmp51 = tmp41 < tmp50
tmp52 = tmp49 & tmp51
tmp53 = tmp47 | tmp52
tmp54 = tl.where(tmp53, tmp40, tmp18)
tmp55 = tl.where(tmp53, tmp41, tmp50)
tmp56 = tmp54 > tmp25
tmp57 = tmp54 == tmp25
tmp58 = tmp54 != tmp54
tmp59 = tmp25 != tmp25
tmp60 = tmp58 > tmp59
tmp61 = tmp56 | tmp60
tmp62 = tmp58 & tmp59
tmp63 = tmp57 | tmp62
tmp64 = tl.full([1], 3, tl.int64)
tmp65 = tmp55 < tmp64
tmp66 = tmp63 & tmp65
tmp67 = tmp61 | tmp66
tmp68 = tl.where(tmp67, tmp54, tmp25)
tmp69 = tl.where(tmp67, tmp55, tmp64)
tl.store(out_ptr0 + (x2), tmp26, xmask)
tl.store(out_ptr1 + (x2), tmp69, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/dy/cdy673rrkjiv3qikoqrlvr3x4djhltg5txuuelkcf3bbizc3xryu.py
# Topologically Sorted Source Nodes: [add_3, max_2], Original ATen: [aten.add, aten.max]
# Source node to ATen node mapping:
# add_3 => add_3
# max_2 => max_2
# Graph fragment:
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_3, %unsqueeze_1), kwargs = {})
# %max_2 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%add_3, 1), kwargs = {})
triton_poi_fused_add_max_1 = async_compile.triton('triton_poi_fused_add_max_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_max_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_max_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 + (16*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (5 + (16*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (6 + (16*x1)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (7 + (16*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp4, tmp9)
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp10, tmp15)
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = triton_helpers.maximum(tmp16, tmp21)
tmp23 = tmp4 > tmp9
tmp24 = tmp4 == tmp9
tmp25 = tmp4 != tmp4
tmp26 = tmp9 != tmp9
tmp27 = tmp25 > tmp26
tmp28 = tmp23 | tmp27
tmp29 = tmp25 & tmp26
tmp30 = tmp24 | tmp29
tmp31 = tl.full([1], 0, tl.int64)
tmp32 = tl.full([1], 1, tl.int64)
tmp33 = tmp31 < tmp32
tmp34 = tmp30 & tmp33
tmp35 = tmp28 | tmp34
tmp36 = tl.where(tmp35, tmp4, tmp9)
tmp37 = tl.where(tmp35, tmp31, tmp32)
tmp38 = tmp36 > tmp15
tmp39 = tmp36 == tmp15
tmp40 = tmp36 != tmp36
tmp41 = tmp15 != tmp15
tmp42 = tmp40 > tmp41
tmp43 = tmp38 | tmp42
tmp44 = tmp40 & tmp41
tmp45 = tmp39 | tmp44
tmp46 = tl.full([1], 2, tl.int64)
tmp47 = tmp37 < tmp46
tmp48 = tmp45 & tmp47
tmp49 = tmp43 | tmp48
tmp50 = tl.where(tmp49, tmp36, tmp15)
tmp51 = tl.where(tmp49, tmp37, tmp46)
tmp52 = tmp50 > tmp21
tmp53 = tmp50 == tmp21
tmp54 = tmp50 != tmp50
tmp55 = tmp21 != tmp21
tmp56 = tmp54 > tmp55
tmp57 = tmp52 | tmp56
tmp58 = tmp54 & tmp55
tmp59 = tmp53 | tmp58
tmp60 = tl.full([1], 3, tl.int64)
tmp61 = tmp51 < tmp60
tmp62 = tmp59 & tmp61
tmp63 = tmp57 | tmp62
tmp64 = tl.where(tmp63, tmp50, tmp21)
tmp65 = tl.where(tmp63, tmp51, tmp60)
tl.store(out_ptr0 + (x2), tmp22, xmask)
tl.store(out_ptr1 + (x2), tmp65, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/y6/cy6aidx4xjpi7w6qjmhugi7jyuzpadheqvm6f3ovuoln3kiyejvo.py
# Topologically Sorted Source Nodes: [add_5, max_3], Original ATen: [aten.add, aten.max]
# Source node to ATen node mapping:
# add_5 => add_5
# max_3 => max_3
# Graph fragment:
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_4, %unsqueeze_1), kwargs = {})
# %max_3 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%add_5, 1), kwargs = {})
triton_poi_fused_add_max_2 = async_compile.triton('triton_poi_fused_add_max_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_max_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_max_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (8 + (16*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (9 + (16*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (10 + (16*x1)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (11 + (16*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp4, tmp9)
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp10, tmp15)
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = triton_helpers.maximum(tmp16, tmp21)
tmp23 = tmp4 > tmp9
tmp24 = tmp4 == tmp9
tmp25 = tmp4 != tmp4
tmp26 = tmp9 != tmp9
tmp27 = tmp25 > tmp26
tmp28 = tmp23 | tmp27
tmp29 = tmp25 & tmp26
tmp30 = tmp24 | tmp29
tmp31 = tl.full([1], 0, tl.int64)
tmp32 = tl.full([1], 1, tl.int64)
tmp33 = tmp31 < tmp32
tmp34 = tmp30 & tmp33
tmp35 = tmp28 | tmp34
tmp36 = tl.where(tmp35, tmp4, tmp9)
tmp37 = tl.where(tmp35, tmp31, tmp32)
tmp38 = tmp36 > tmp15
tmp39 = tmp36 == tmp15
tmp40 = tmp36 != tmp36
tmp41 = tmp15 != tmp15
tmp42 = tmp40 > tmp41
tmp43 = tmp38 | tmp42
tmp44 = tmp40 & tmp41
tmp45 = tmp39 | tmp44
tmp46 = tl.full([1], 2, tl.int64)
tmp47 = tmp37 < tmp46
tmp48 = tmp45 & tmp47
tmp49 = tmp43 | tmp48
tmp50 = tl.where(tmp49, tmp36, tmp15)
tmp51 = tl.where(tmp49, tmp37, tmp46)
tmp52 = tmp50 > tmp21
tmp53 = tmp50 == tmp21
tmp54 = tmp50 != tmp50
tmp55 = tmp21 != tmp21
tmp56 = tmp54 > tmp55
tmp57 = tmp52 | tmp56
tmp58 = tmp54 & tmp55
tmp59 = tmp53 | tmp58
tmp60 = tl.full([1], 3, tl.int64)
tmp61 = tmp51 < tmp60
tmp62 = tmp59 & tmp61
tmp63 = tmp57 | tmp62
tmp64 = tl.where(tmp63, tmp50, tmp21)
tmp65 = tl.where(tmp63, tmp51, tmp60)
tl.store(out_ptr0 + (x2), tmp22, xmask)
tl.store(out_ptr1 + (x2), tmp65, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/pz/cpzuxuwhwbn22vaq3ndvkouqpztabhutjiktjp3d5vxwx3ptniiy.py
# Topologically Sorted Source Nodes: [v_6, add_7, max_4, tag_1, tag_2, tag_3], Original ATen: [aten.add, aten.max, aten.gather]
# Source node to ATen node mapping:
# add_7 => add_7
# max_4 => max_4
# tag_1 => gather
# tag_2 => gather_1
# tag_3 => gather_2
# v_6 => add_6
# Graph fragment:
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, %select_3), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %unsqueeze_5), kwargs = {})
# %max_4 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%add_7, 1, True), kwargs = {})
# %gather : [num_users=2] = call_function[target=torch.ops.aten.gather.default](args = (%getitem_5, 1, %getitem_7), kwargs = {})
# %gather_1 : [num_users=2] = call_function[target=torch.ops.aten.gather.default](args = (%getitem_3, 1, %gather), kwargs = {})
# %gather_2 : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%getitem_1, 1, %gather_1), kwargs = {})
triton_poi_fused_add_gather_max_3 = async_compile.triton('triton_poi_fused_add_gather_max_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i64', 4: '*i64', 5: '*i64', 6: '*i64', 7: '*i64', 8: '*i64', 9: '*i64', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_gather_max_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_gather_max_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (1))
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp27 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (2))
tmp31 = tl.broadcast_to(tmp30, [XBLOCK])
tmp47 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr1 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp50 = tl.load(in_ptr2 + (3))
tmp51 = tl.broadcast_to(tmp50, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 + tmp4
tmp8 = tmp6 + tmp7
tmp11 = tmp8 + tmp10
tmp12 = tmp5 > tmp11
tmp13 = tmp5 == tmp11
tmp14 = tmp5 != tmp5
tmp15 = tmp11 != tmp11
tmp16 = tmp14 > tmp15
tmp17 = tmp12 | tmp16
tmp18 = tmp14 & tmp15
tmp19 = tmp13 | tmp18
tmp20 = tl.full([1], 0, tl.int64)
tmp21 = tl.full([1], 1, tl.int64)
tmp22 = tmp20 < tmp21
tmp23 = tmp19 & tmp22
tmp24 = tmp17 | tmp23
tmp25 = tl.where(tmp24, tmp5, tmp11)
tmp26 = tl.where(tmp24, tmp20, tmp21)
tmp29 = tmp27 + tmp28
tmp32 = tmp29 + tmp31
tmp33 = tmp25 > tmp32
tmp34 = tmp25 == tmp32
tmp35 = tmp25 != tmp25
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 2, tl.int64)
tmp42 = tmp26 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tmp45 = tl.where(tmp44, tmp25, tmp32)
tmp46 = tl.where(tmp44, tmp26, tmp41)
tmp49 = tmp47 + tmp48
tmp52 = tmp49 + tmp51
tmp53 = tmp45 > tmp52
tmp54 = tmp45 == tmp52
tmp55 = tmp45 != tmp45
tmp56 = tmp52 != tmp52
tmp57 = tmp55 > tmp56
tmp58 = tmp53 | tmp57
tmp59 = tmp55 & tmp56
tmp60 = tmp54 | tmp59
tmp61 = tl.full([1], 3, tl.int64)
tmp62 = tmp46 < tmp61
tmp63 = tmp60 & tmp62
tmp64 = tmp58 | tmp63
tmp65 = tl.where(tmp64, tmp45, tmp52)
tmp66 = tl.where(tmp64, tmp46, tmp61)
tmp67 = tl.full([XBLOCK], 4, tl.int32)
tmp68 = tmp66 + tmp67
tmp69 = tmp66 < 0
tmp70 = tl.where(tmp69, tmp68, tmp66)
tl.device_assert(((0 <= tmp70) & (tmp70 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp70 < 4")
tmp72 = tl.load(in_ptr3 + (tmp70 + (4*x0)), xmask, eviction_policy='evict_last')
tmp73 = tmp72 + tmp67
tmp74 = tmp72 < 0
tmp75 = tl.where(tmp74, tmp73, tmp72)
tl.device_assert(((0 <= tmp75) & (tmp75 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp75 < 4")
tmp77 = tl.load(in_ptr4 + (tmp75 + (4*x0)), xmask, eviction_policy='evict_last')
tmp78 = tmp77 + tmp67
tmp79 = tmp77 < 0
tmp80 = tl.where(tmp79, tmp78, tmp77)
tl.device_assert(((0 <= tmp80) & (tmp80 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp80 < 4")
tmp82 = tl.load(in_ptr5 + (tmp80 + (4*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (4*x0), tmp66, xmask)
tl.store(out_ptr1 + (4*x0), tmp82, xmask)
tl.store(out_ptr2 + (4*x0), tmp77, xmask)
tl.store(out_ptr3 + (4*x0), tmp72, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, ), (1, ))
assert_size_stride(arg2_1, (4, 4), (4, 1))
assert_size_stride(arg3_1, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
# Topologically Sorted Source Nodes: [add_1, max_1], Original ATen: [aten.add, aten.max]
stream0 = get_raw_stream(0)
triton_poi_fused_add_max_0.run(arg0_1, arg1_1, arg2_1, buf0, buf1, 16, grid=grid(16), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
# Topologically Sorted Source Nodes: [add_3, max_2], Original ATen: [aten.add, aten.max]
triton_poi_fused_add_max_1.run(buf0, arg0_1, arg2_1, buf2, buf3, 16, grid=grid(16), stream=stream0)
buf4 = buf0; del buf0 # reuse
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
# Topologically Sorted Source Nodes: [add_5, max_3], Original ATen: [aten.add, aten.max]
triton_poi_fused_add_max_2.run(buf2, arg0_1, arg2_1, buf4, buf5, 16, grid=grid(16), stream=stream0)
del arg2_1
del buf2
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
buf6 = reinterpret_tensor(buf10, (4, 1), (4, 1), 3) # alias
buf7 = reinterpret_tensor(buf10, (4, 1), (4, 1), 0) # alias
buf8 = reinterpret_tensor(buf10, (4, 1), (4, 1), 1) # alias
buf9 = reinterpret_tensor(buf10, (4, 1), (4, 1), 2) # alias
# Topologically Sorted Source Nodes: [v_6, add_7, max_4, tag_1, tag_2, tag_3], Original ATen: [aten.add, aten.max, aten.gather]
triton_poi_fused_add_gather_max_3.run(buf4, arg0_1, arg3_1, buf5, buf3, buf1, buf6, buf7, buf8, buf9, 4, grid=grid(4), stream=stream0)
del arg0_1
del arg3_1
del buf1
del buf3
del buf4
del buf5
return (buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CRF(nn.Module):
"""
Implements Conditional Random Fields that can be trained via
backpropagation.
"""
def __init__(self, num_tags):
super(CRF, self).__init__()
self.num_tags = num_tags
self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags))
self.start_transitions = nn.Parameter(torch.randn(num_tags))
self.stop_transitions = nn.Parameter(torch.randn(num_tags))
nn.init.xavier_normal_(self.transitions)
def forward(self, feats):
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
return self._viterbi(feats)
def loss(self, feats, tags):
"""
Computes negative log likelihood between features and tags.
Essentially difference between individual sequence scores and
sum of all possible sequence scores (partition function)
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns:
Negative log likelihood [a scalar]
"""
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
if len(tags.shape) != 2:
raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape)
)
if feats.shape[:2] != tags.shape:
raise ValueError(
'First two dimensions of feats and tags must match')
sequence_score = self._sequence_score(feats, tags)
partition_function = self._partition_function(feats)
log_probability = sequence_score - partition_function
return -log_probability.mean()
def _sequence_score(self, feats, tags):
"""
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns: Sequence score of shape [batch size]
"""
feats.shape[0]
feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1
)
tags_pairs = tags.unfold(1, 2, 1)
indices = tags_pairs.permute(2, 0, 1).chunk(2)
trans_score = self.transitions[indices].squeeze(0).sum(dim=-1)
start_score = self.start_transitions[tags[:, 0]]
stop_score = self.stop_transitions[tags[:, -1]]
return feat_score + start_score + trans_score + stop_score
def _partition_function(self, feats):
"""
Computes the partitition function for CRF using the forward algorithm.
Basically calculate scores for all possible tag sequences for
the given feature vector sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns:
Total scores of shape [batch size]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
a = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
for i in range(1, seq_size):
feat = feats[:, i].unsqueeze(1)
a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1)
return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1)
def _viterbi(self, feats):
"""
Uses Viterbi algorithm to predict the best sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns: Best tag sequence [batch size, sequence length]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
v = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
paths = []
for i in range(1, seq_size):
feat = feats[:, i]
v, idx = (v.unsqueeze(-1) + transitions).max(1)
paths.append(idx)
v = v + feat
v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True)
tags = [tag]
for idx in reversed(paths):
tag = idx.gather(1, tag)
tags.append(tag)
tags.reverse()
return torch.cat(tags, 1)
def _log_sum_exp(self, logits, dim):
"""
Computes log-sum-exp in a stable way
"""
max_val, _ = logits.max(dim)
return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log()
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_tags': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_max_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr1 + 1)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + 2)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp17 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr1 + 3)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp24 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp9 = tmp6 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = triton_helpers.maximum(tmp5, tmp11)
tmp16 = tmp13 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp12, tmp18)
tmp23 = tmp20 + tmp22
tmp25 = tmp23 + tmp24
tmp26 = triton_helpers.maximum(tmp19, tmp25)
tmp27 = tmp5 > tmp11
tmp28 = tmp5 == tmp11
tmp29 = tmp5 != tmp5
tmp30 = tmp11 != tmp11
tmp31 = tmp29 > tmp30
tmp32 = tmp27 | tmp31
tmp33 = tmp29 & tmp30
tmp34 = tmp28 | tmp33
tmp35 = tl.full([1], 0, tl.int64)
tmp36 = tl.full([1], 1, tl.int64)
tmp37 = tmp35 < tmp36
tmp38 = tmp34 & tmp37
tmp39 = tmp32 | tmp38
tmp40 = tl.where(tmp39, tmp5, tmp11)
tmp41 = tl.where(tmp39, tmp35, tmp36)
tmp42 = tmp40 > tmp18
tmp43 = tmp40 == tmp18
tmp44 = tmp40 != tmp40
tmp45 = tmp18 != tmp18
tmp46 = tmp44 > tmp45
tmp47 = tmp42 | tmp46
tmp48 = tmp44 & tmp45
tmp49 = tmp43 | tmp48
tmp50 = tl.full([1], 2, tl.int64)
tmp51 = tmp41 < tmp50
tmp52 = tmp49 & tmp51
tmp53 = tmp47 | tmp52
tmp54 = tl.where(tmp53, tmp40, tmp18)
tmp55 = tl.where(tmp53, tmp41, tmp50)
tmp56 = tmp54 > tmp25
tmp57 = tmp54 == tmp25
tmp58 = tmp54 != tmp54
tmp59 = tmp25 != tmp25
tmp60 = tmp58 > tmp59
tmp61 = tmp56 | tmp60
tmp62 = tmp58 & tmp59
tmp63 = tmp57 | tmp62
tmp64 = tl.full([1], 3, tl.int64)
tmp65 = tmp55 < tmp64
tmp66 = tmp63 & tmp65
tmp67 = tmp61 | tmp66
tl.where(tmp67, tmp54, tmp25)
tmp69 = tl.where(tmp67, tmp55, tmp64)
tl.store(out_ptr0 + x2, tmp26, xmask)
tl.store(out_ptr1 + x2, tmp69, xmask)
@triton.jit
def triton_poi_fused_add_max_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (5 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (6 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (7 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp4, tmp9)
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp10, tmp15)
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = triton_helpers.maximum(tmp16, tmp21)
tmp23 = tmp4 > tmp9
tmp24 = tmp4 == tmp9
tmp25 = tmp4 != tmp4
tmp26 = tmp9 != tmp9
tmp27 = tmp25 > tmp26
tmp28 = tmp23 | tmp27
tmp29 = tmp25 & tmp26
tmp30 = tmp24 | tmp29
tmp31 = tl.full([1], 0, tl.int64)
tmp32 = tl.full([1], 1, tl.int64)
tmp33 = tmp31 < tmp32
tmp34 = tmp30 & tmp33
tmp35 = tmp28 | tmp34
tmp36 = tl.where(tmp35, tmp4, tmp9)
tmp37 = tl.where(tmp35, tmp31, tmp32)
tmp38 = tmp36 > tmp15
tmp39 = tmp36 == tmp15
tmp40 = tmp36 != tmp36
tmp41 = tmp15 != tmp15
tmp42 = tmp40 > tmp41
tmp43 = tmp38 | tmp42
tmp44 = tmp40 & tmp41
tmp45 = tmp39 | tmp44
tmp46 = tl.full([1], 2, tl.int64)
tmp47 = tmp37 < tmp46
tmp48 = tmp45 & tmp47
tmp49 = tmp43 | tmp48
tmp50 = tl.where(tmp49, tmp36, tmp15)
tmp51 = tl.where(tmp49, tmp37, tmp46)
tmp52 = tmp50 > tmp21
tmp53 = tmp50 == tmp21
tmp54 = tmp50 != tmp50
tmp55 = tmp21 != tmp21
tmp56 = tmp54 > tmp55
tmp57 = tmp52 | tmp56
tmp58 = tmp54 & tmp55
tmp59 = tmp53 | tmp58
tmp60 = tl.full([1], 3, tl.int64)
tmp61 = tmp51 < tmp60
tmp62 = tmp59 & tmp61
tmp63 = tmp57 | tmp62
tl.where(tmp63, tmp50, tmp21)
tmp65 = tl.where(tmp63, tmp51, tmp60)
tl.store(out_ptr0 + x2, tmp22, xmask)
tl.store(out_ptr1 + x2, tmp65, xmask)
@triton.jit
def triton_poi_fused_add_max_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (8 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (9 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (10 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (11 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp4, tmp9)
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp10, tmp15)
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = triton_helpers.maximum(tmp16, tmp21)
tmp23 = tmp4 > tmp9
tmp24 = tmp4 == tmp9
tmp25 = tmp4 != tmp4
tmp26 = tmp9 != tmp9
tmp27 = tmp25 > tmp26
tmp28 = tmp23 | tmp27
tmp29 = tmp25 & tmp26
tmp30 = tmp24 | tmp29
tmp31 = tl.full([1], 0, tl.int64)
tmp32 = tl.full([1], 1, tl.int64)
tmp33 = tmp31 < tmp32
tmp34 = tmp30 & tmp33
tmp35 = tmp28 | tmp34
tmp36 = tl.where(tmp35, tmp4, tmp9)
tmp37 = tl.where(tmp35, tmp31, tmp32)
tmp38 = tmp36 > tmp15
tmp39 = tmp36 == tmp15
tmp40 = tmp36 != tmp36
tmp41 = tmp15 != tmp15
tmp42 = tmp40 > tmp41
tmp43 = tmp38 | tmp42
tmp44 = tmp40 & tmp41
tmp45 = tmp39 | tmp44
tmp46 = tl.full([1], 2, tl.int64)
tmp47 = tmp37 < tmp46
tmp48 = tmp45 & tmp47
tmp49 = tmp43 | tmp48
tmp50 = tl.where(tmp49, tmp36, tmp15)
tmp51 = tl.where(tmp49, tmp37, tmp46)
tmp52 = tmp50 > tmp21
tmp53 = tmp50 == tmp21
tmp54 = tmp50 != tmp50
tmp55 = tmp21 != tmp21
tmp56 = tmp54 > tmp55
tmp57 = tmp52 | tmp56
tmp58 = tmp54 & tmp55
tmp59 = tmp53 | tmp58
tmp60 = tl.full([1], 3, tl.int64)
tmp61 = tmp51 < tmp60
tmp62 = tmp59 & tmp61
tmp63 = tmp57 | tmp62
tl.where(tmp63, tmp50, tmp21)
tmp65 = tl.where(tmp63, tmp51, tmp60)
tl.store(out_ptr0 + x2, tmp22, xmask)
tl.store(out_ptr1 + x2, tmp65, xmask)
@triton.jit
def triton_poi_fused_add_gather_max_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr2 + 1)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp27 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp28 = tl.load(in_ptr1 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr2 + 2)
tmp31 = tl.broadcast_to(tmp30, [XBLOCK])
tmp47 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp48 = tl.load(in_ptr1 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp50 = tl.load(in_ptr2 + 3)
tmp51 = tl.broadcast_to(tmp50, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 + tmp4
tmp8 = tmp6 + tmp7
tmp11 = tmp8 + tmp10
tmp12 = tmp5 > tmp11
tmp13 = tmp5 == tmp11
tmp14 = tmp5 != tmp5
tmp15 = tmp11 != tmp11
tmp16 = tmp14 > tmp15
tmp17 = tmp12 | tmp16
tmp18 = tmp14 & tmp15
tmp19 = tmp13 | tmp18
tmp20 = tl.full([1], 0, tl.int64)
tmp21 = tl.full([1], 1, tl.int64)
tmp22 = tmp20 < tmp21
tmp23 = tmp19 & tmp22
tmp24 = tmp17 | tmp23
tmp25 = tl.where(tmp24, tmp5, tmp11)
tmp26 = tl.where(tmp24, tmp20, tmp21)
tmp29 = tmp27 + tmp28
tmp32 = tmp29 + tmp31
tmp33 = tmp25 > tmp32
tmp34 = tmp25 == tmp32
tmp35 = tmp25 != tmp25
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 2, tl.int64)
tmp42 = tmp26 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tmp45 = tl.where(tmp44, tmp25, tmp32)
tmp46 = tl.where(tmp44, tmp26, tmp41)
tmp49 = tmp47 + tmp48
tmp52 = tmp49 + tmp51
tmp53 = tmp45 > tmp52
tmp54 = tmp45 == tmp52
tmp55 = tmp45 != tmp45
tmp56 = tmp52 != tmp52
tmp57 = tmp55 > tmp56
tmp58 = tmp53 | tmp57
tmp59 = tmp55 & tmp56
tmp60 = tmp54 | tmp59
tmp61 = tl.full([1], 3, tl.int64)
tmp62 = tmp46 < tmp61
tmp63 = tmp60 & tmp62
tmp64 = tmp58 | tmp63
tl.where(tmp64, tmp45, tmp52)
tmp66 = tl.where(tmp64, tmp46, tmp61)
tmp67 = tl.full([XBLOCK], 4, tl.int32)
tmp68 = tmp66 + tmp67
tmp69 = tmp66 < 0
tmp70 = tl.where(tmp69, tmp68, tmp66)
tl.device_assert((0 <= tmp70) & (tmp70 < 4) | ~xmask,
'index out of bounds: 0 <= tmp70 < 4')
tmp72 = tl.load(in_ptr3 + (tmp70 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp73 = tmp72 + tmp67
tmp74 = tmp72 < 0
tmp75 = tl.where(tmp74, tmp73, tmp72)
tl.device_assert((0 <= tmp75) & (tmp75 < 4) | ~xmask,
'index out of bounds: 0 <= tmp75 < 4')
tmp77 = tl.load(in_ptr4 + (tmp75 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp78 = tmp77 + tmp67
tmp79 = tmp77 < 0
tmp80 = tl.where(tmp79, tmp78, tmp77)
tl.device_assert((0 <= tmp80) & (tmp80 < 4) | ~xmask,
'index out of bounds: 0 <= tmp80 < 4')
tmp82 = tl.load(in_ptr5 + (tmp80 + 4 * x0), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + 4 * x0, tmp66, xmask)
tl.store(out_ptr1 + 4 * x0, tmp82, xmask)
tl.store(out_ptr2 + 4 * x0, tmp77, xmask)
tl.store(out_ptr3 + 4 * x0, tmp72, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4,), (1,))
assert_size_stride(arg2_1, (4, 4), (4, 1))
assert_size_stride(arg3_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused_add_max_0[grid(16)](arg0_1, arg1_1, arg2_1, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
triton_poi_fused_add_max_1[grid(16)](buf0, arg0_1, arg2_1, buf2,
buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf4 = buf0
del buf0
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
triton_poi_fused_add_max_2[grid(16)](buf2, arg0_1, arg2_1, buf4,
buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg2_1
del buf2
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
buf6 = reinterpret_tensor(buf10, (4, 1), (4, 1), 3)
buf7 = reinterpret_tensor(buf10, (4, 1), (4, 1), 0)
buf8 = reinterpret_tensor(buf10, (4, 1), (4, 1), 1)
buf9 = reinterpret_tensor(buf10, (4, 1), (4, 1), 2)
triton_poi_fused_add_gather_max_3[grid(4)](buf4, arg0_1, arg3_1,
buf5, buf3, buf1, buf6, buf7, buf8, buf9, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del arg0_1
del arg3_1
del buf1
del buf3
del buf4
del buf5
return buf10,
class CRFNew(nn.Module):
"""
Implements Conditional Random Fields that can be trained via
backpropagation.
"""
def __init__(self, num_tags):
super(CRFNew, self).__init__()
self.num_tags = num_tags
self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags))
self.start_transitions = nn.Parameter(torch.randn(num_tags))
self.stop_transitions = nn.Parameter(torch.randn(num_tags))
nn.init.xavier_normal_(self.transitions)
def loss(self, feats, tags):
"""
Computes negative log likelihood between features and tags.
Essentially difference between individual sequence scores and
sum of all possible sequence scores (partition function)
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns:
Negative log likelihood [a scalar]
"""
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
if len(tags.shape) != 2:
raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape)
)
if feats.shape[:2] != tags.shape:
raise ValueError(
'First two dimensions of feats and tags must match')
sequence_score = self._sequence_score(feats, tags)
partition_function = self._partition_function(feats)
log_probability = sequence_score - partition_function
return -log_probability.mean()
def _sequence_score(self, feats, tags):
"""
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns: Sequence score of shape [batch size]
"""
feats.shape[0]
feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1
)
tags_pairs = tags.unfold(1, 2, 1)
indices = tags_pairs.permute(2, 0, 1).chunk(2)
trans_score = self.transitions[indices].squeeze(0).sum(dim=-1)
start_score = self.start_transitions[tags[:, 0]]
stop_score = self.stop_transitions[tags[:, -1]]
return feat_score + start_score + trans_score + stop_score
def _partition_function(self, feats):
"""
Computes the partitition function for CRF using the forward algorithm.
Basically calculate scores for all possible tag sequences for
the given feature vector sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns:
Total scores of shape [batch size]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
a = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
for i in range(1, seq_size):
feat = feats[:, i].unsqueeze(1)
a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1)
return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1)
def _viterbi(self, feats):
"""
Uses Viterbi algorithm to predict the best sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns: Best tag sequence [batch size, sequence length]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
v = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
paths = []
for i in range(1, seq_size):
feat = feats[:, i]
v, idx = (v.unsqueeze(-1) + transitions).max(1)
paths.append(idx)
v = v + feat
v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True)
tags = [tag]
for idx in reversed(paths):
tag = idx.gather(1, tag)
tags.append(tag)
tags.reverse()
return torch.cat(tags, 1)
def _log_sum_exp(self, logits, dim):
"""
Computes log-sum-exp in a stable way
"""
max_val, _ = logits.max(dim)
return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log()
def forward(self, input_0):
arg2_1 = self.transitions
arg1_1 = self.start_transitions
arg3_1 = self.stop_transitions
arg0_1 = input_0
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| yezhengli-Mr9/torchnlp | CRF | false | 13,148 | [
"Apache-2.0"
]
| 0 | 0f2ad6d149a413da9f03c6f6694c429746de6551 | https://github.com/yezhengli-Mr9/torchnlp/tree/0f2ad6d149a413da9f03c6f6694c429746de6551 |
ScaledDotAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/au/cau4pihcaptiev5y2ewn2o2nvrwhk7hogc72cofmmtbyv4rxc2oy.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/hg/chg3iq6bscxmmxv5f7tuzgwycb4mgrimwfhv2nauw5rj4tt5cmv2.py
# Topologically Sorted Source Nodes: [weights_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# weights_1 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/zu/czuvep3dmpmqmhiiliwubh4ghdt2qr27va67sszkua7trziinwov.py
# Topologically Sorted Source Nodes: [weights_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# weights_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_8, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(primals_4, buf2, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4), (16, 4, 1))
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv1d_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(primals_7, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d_2], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_8, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4), (16, 4, 1))
buf6 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf6, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
buf7 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf7, primals_6, 64, grid=grid(64), stream=stream0)
del primals_6
buf8 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [weights], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 4), (16, 1, 4), 0), buf7, out=buf8)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weights_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf8, buf9, 64, grid=grid(64), stream=stream0)
buf10 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [weights_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf9, buf10, 64, grid=grid(64), stream=stream0)
buf11 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [conv1d_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf11, primals_9, 64, grid=grid(64), stream=stream0)
del primals_9
buf12 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [result], Original ATen: [aten.bmm]
extern_kernels.bmm(buf10, reinterpret_tensor(buf11, (4, 4, 4), (16, 1, 4), 0), out=buf12)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv1d_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf12, buf13, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d_3], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf14, (4, 4, 4), (16, 4, 1))
del buf13
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [conv1d_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf15, primals_11, 64, grid=grid(64), stream=stream0)
del primals_11
return (reinterpret_tensor(buf15, (4, 4, 4), (16, 1, 4), 0), buf10, primals_2, primals_5, primals_8, primals_10, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_4, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_7, (4, 4, 4), (16, 1, 4), 0), buf10, reinterpret_tensor(buf12, (4, 4, 4), (16, 1, 4), 0), buf11, buf6, reinterpret_tensor(buf7, (4, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn import LayerNorm
def scaled_dot_attention(q, k, v, mask=None, noise=0, dropout=lambda x: x):
"""
:param q: queries, (batch, time1, channels1)
:param k: keys, (batch, time2, channels1)
:param v: values, (batch, time2, channels2)
:param mask: boolean mask, (batch, time1, time2)
:param dropout: a dropout function - this allows keeping dropout as a module -> better control when training/eval
:return: (batch, time1, channels2), (batch, time1, time2)
"""
weights = torch.matmul(q, k.transpose(2, 1))
if mask is not None:
weights = weights.masked_fill(~mask, float('-inf'))
if noise:
weights += noise * torch.randn(weights.shape)
weights = torch.softmax(weights, dim=-1)
weights = dropout(weights)
result = torch.matmul(weights, v)
return result, weights
def mask(x, lengths, dim=-1):
assert dim != 0, 'Masking not available for batch dimension'
assert len(lengths) == x.shape[0
], 'Lengths must contain as many elements as there are items in the batch'
lengths = torch.as_tensor(lengths)
to_expand = [1] * (x.ndim - 1) + [-1]
mask = torch.arange(x.shape[dim]).expand(to_expand).transpose(dim, -1
).expand(x.shape)
mask = mask < lengths.expand(to_expand).transpose(0, -1)
return mask
class Conv1d(nn.Conv1d):
"""A wrapper around nn.Conv1d, that works on (batch, time, channels)"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
dilation=1, groups=1, bias=True, padding=0):
super(Conv1d, self).__init__(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride, dilation=
dilation, groups=groups, bias=bias, padding=padding)
def forward(self, x):
return super().forward(x.transpose(2, 1)).transpose(2, 1)
class ScaledDotAttention(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, noise=0,
normalize=False, dropout=False):
super(ScaledDotAttention, self).__init__()
self.noise = noise
self.dropout = torch.nn.Dropout(p=dropout)
self.normalize = normalize
self.fc_query = Conv1d(in_channels, hidden_channels)
self.fc_keys = Conv1d(in_channels, hidden_channels)
if normalize:
self.qnorm = LayerNorm(in_channels)
self.knorm = LayerNorm(in_channels)
self.fc_keys.weight = torch.nn.Parameter(self.fc_query.weight.clone())
self.fc_keys.bias = torch.nn.Parameter(self.fc_query.bias.clone())
self.fc_values = Conv1d(in_channels, hidden_channels)
self.fc_out = Conv1d(hidden_channels, out_channels)
def forward(self, q, k, v, mask=None):
"""
:param q: queries, (batch, time1, channels1)
:param k: keys, (batch, time2, channels1)
:param v: values, (batch, time2, channels2)
:param mask: boolean mask, (batch, time1, time2)
:return: (batch, time1, channels2), (batch, time1, time2)
"""
noise = self.noise if self.training else 0
if self.normalize:
q = self.qnorm(q)
k = self.knorm(k)
alignment, weights = scaled_dot_attention(self.fc_query(q), self.
fc_keys(k), self.fc_values(v), mask, noise=noise, dropout=self.
dropout)
alignment = self.fc_out(alignment)
return alignment, weights
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'in_channels': 4, 'hidden_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.nn import LayerNorm
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_8, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = buf0
del buf0
triton_poi_fused_convolution_0[grid(16, 4)](primals_4, buf2, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4), (16, 4, 1))
buf4 = buf2
del buf2
triton_poi_fused_convolution_0[grid(16, 4)](primals_7, buf4, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf5 = extern_kernels.convolution(buf4, primals_8, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4), (16, 4, 1))
buf6 = buf1
del buf1
triton_poi_fused_convolution_1[grid(64)](buf6, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf7 = buf3
del buf3
triton_poi_fused_convolution_1[grid(64)](buf7, primals_6, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_6
buf8 = buf4
del buf4
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 4), (16, 1, 4),
0), buf7, out=buf8)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf10 = buf8
del buf8
triton_poi_fused__softmax_3[grid(64)](buf9, buf10, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf11 = buf5
del buf5
triton_poi_fused_convolution_1[grid(64)](buf11, primals_9, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_9
buf12 = buf9
del buf9
extern_kernels.bmm(buf10, reinterpret_tensor(buf11, (4, 4, 4), (16,
1, 4), 0), out=buf12)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_convolution_0[grid(16, 4)](buf12, buf13, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf14, (4, 4, 4), (16, 4, 1))
del buf13
buf15 = buf14
del buf14
triton_poi_fused_convolution_1[grid(64)](buf15, primals_11, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_11
return reinterpret_tensor(buf15, (4, 4, 4), (16, 1, 4), 0
), buf10, primals_2, primals_5, primals_8, primals_10, reinterpret_tensor(
primals_1, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_4,
(4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_7, (4, 4, 4),
(16, 1, 4), 0), buf10, reinterpret_tensor(buf12, (4, 4, 4), (16, 1,
4), 0), buf11, buf6, reinterpret_tensor(buf7, (4, 4, 4), (16, 1, 4), 0)
def scaled_dot_attention(q, k, v, mask=None, noise=0, dropout=lambda x: x):
"""
:param q: queries, (batch, time1, channels1)
:param k: keys, (batch, time2, channels1)
:param v: values, (batch, time2, channels2)
:param mask: boolean mask, (batch, time1, time2)
:param dropout: a dropout function - this allows keeping dropout as a module -> better control when training/eval
:return: (batch, time1, channels2), (batch, time1, time2)
"""
weights = torch.matmul(q, k.transpose(2, 1))
if mask is not None:
weights = weights.masked_fill(~mask, float('-inf'))
if noise:
weights += noise * torch.randn(weights.shape)
weights = torch.softmax(weights, dim=-1)
weights = dropout(weights)
result = torch.matmul(weights, v)
return result, weights
def mask(x, lengths, dim=-1):
assert dim != 0, 'Masking not available for batch dimension'
assert len(lengths) == x.shape[0
], 'Lengths must contain as many elements as there are items in the batch'
lengths = torch.as_tensor(lengths)
to_expand = [1] * (x.ndim - 1) + [-1]
mask = torch.arange(x.shape[dim]).expand(to_expand).transpose(dim, -1
).expand(x.shape)
mask = mask < lengths.expand(to_expand).transpose(0, -1)
return mask
class Conv1d(nn.Conv1d):
"""A wrapper around nn.Conv1d, that works on (batch, time, channels)"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
dilation=1, groups=1, bias=True, padding=0):
super(Conv1d, self).__init__(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride, dilation=
dilation, groups=groups, bias=bias, padding=padding)
def forward(self, x):
return super().forward(x.transpose(2, 1)).transpose(2, 1)
class ScaledDotAttentionNew(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, noise=0,
normalize=False, dropout=False):
super(ScaledDotAttentionNew, self).__init__()
self.noise = noise
self.dropout = torch.nn.Dropout(p=dropout)
self.normalize = normalize
self.fc_query = Conv1d(in_channels, hidden_channels)
self.fc_keys = Conv1d(in_channels, hidden_channels)
if normalize:
self.qnorm = LayerNorm(in_channels)
self.knorm = LayerNorm(in_channels)
self.fc_keys.weight = torch.nn.Parameter(self.fc_query.weight.clone())
self.fc_keys.bias = torch.nn.Parameter(self.fc_query.bias.clone())
self.fc_values = Conv1d(in_channels, hidden_channels)
self.fc_out = Conv1d(hidden_channels, out_channels)
def forward(self, input_0, input_1, input_2):
primals_2 = self.fc_query.weight
primals_3 = self.fc_query.bias
primals_5 = self.fc_keys.weight
primals_6 = self.fc_keys.bias
primals_8 = self.fc_values.weight
primals_9 = self.fc_values.bias
primals_10 = self.fc_out.weight
primals_11 = self.fc_out.bias
primals_1 = input_0
primals_4 = input_1
primals_7 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1]
| yhgon/speedyspeech | ScaledDotAttention | false | 13,149 | [
"BSD-3-Clause"
]
| 0 | 574c6a94091431f313e2aae8e154b8c80e6908ce | https://github.com/yhgon/speedyspeech/tree/574c6a94091431f313e2aae8e154b8c80e6908ce |
CoPredictor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/kf/ckftf6miq75hbgw2rf2vbkft5gpenqdeq5wwyuysab22y6rrdwxp.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_6 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul_2, %full_default], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = 0.5
tmp7 = tmp5 * tmp6
tmp8 = 0.7071067811865476
tmp9 = tmp5 * tmp8
tmp10 = libdevice.erf(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp7 * tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tmp17 = tl.full([1], 5, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp20 = tl.where(tmp16, tmp11, tmp19)
tmp21 = tl.where(tmp4, tmp15, tmp20)
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ob/cobndnzvyx4riiadkixqniohayrctuojnqgblko2n5472f5v7cg2.py
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# s => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_6,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 100
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5) % 4
x2 = (xindex // 20)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (5*x2) + (25*x1)), xmask)
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/7a/c7ab5hnsm4wwlypalhxe7gcqhidpiximafne5b56brtso52swhhv.py
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# s => clone_5
# Graph fragment:
# %clone_5 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_9,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 20
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 4
x3 = (xindex // 4)
y0 = yindex % 5
y1 = (yindex // 5)
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (5*x3) + (20*x2) + (80*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x5 + (16*y4)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/n4/cn4s3wdzzwdkscybngjo7s35ervcckzpn4p6pc375xmiynduphin.py
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# x_9 => add_2, erf_2, mul_6, mul_7, mul_8
# Graph fragment:
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_12, 0.5), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_12, 0.7071067811865476), kwargs = {})
# %erf_2 : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_7,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf_2, 1), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %add_2), kwargs = {})
triton_poi_fused_gelu_3 = async_compile.triton('triton_poi_fused_gelu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/aa/caa76b5pe7bxawfeuiyrgvh34uonkxgzq54uzjqkfpaqqis74mja.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add_3
# Graph fragment:
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_11, %view_14), kwargs = {})
triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x5 = (xindex // 4) % 16
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 5, 5), (25, 5, 1))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_5
del primals_6
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf0, buf2, 80, grid=grid(80), stream=stream0)
buf3 = empty_strided_cuda((5, 4, 1, 5, 1, 1), (20, 5, 5, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(primals_7, buf3, 100, grid=grid(100), stream=stream0)
del primals_7
buf4 = empty_strided_cuda((1, 16, 20), (320, 20, 1), torch.float32)
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (1, 16, 5), (0, 5, 1), 0), reinterpret_tensor(buf3, (1, 5, 20), (0, 20, 1), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(buf1, buf5, 80, grid=grid(80), stream=stream0)
buf6 = empty_strided_cuda((4, 5, 4, 4, 1, 1), (80, 16, 4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf4, buf6, 20, 16, grid=grid(20, 16), stream=stream0)
del buf4
buf7 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.bmm]
extern_kernels.bmm(buf5, reinterpret_tensor(buf6, (4, 5, 16), (80, 16, 1), 0), out=buf7)
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, reinterpret_tensor(primals_8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_10
del primals_9
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.gelu]
triton_poi_fused_gelu_3.run(buf8, buf9, 64, grid=grid(64), stream=stream0)
buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf10)
buf11 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 1, 16, 4), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
triton_poi_fused_add_4.run(buf11, buf10, primals_12, 256, grid=grid(256), stream=stream0)
del buf10
del primals_12
return (buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf0, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), buf1, reinterpret_tensor(primals_8, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf9, (16, 4), (4, 1), 0), primals_11, reinterpret_tensor(buf5, (4, 5, 4), (20, 1, 5), 0), reinterpret_tensor(buf6, (4, 16, 5), (80, 1, 16), 0), reinterpret_tensor(buf2, (1, 5, 16), (80, 1, 5), 0), reinterpret_tensor(buf3, (1, 20, 5), (100, 1, 20), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 5, 5), (25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.autograd
import torch.nn as nn
class Biaffine(nn.Module):
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super(Biaffine, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
weight = torch.zeros((n_out, n_in + int(bias_x), n_in + int(bias_y)))
nn.init.xavier_normal_(weight)
self.weight = nn.Parameter(weight, requires_grad=True)
def extra_repr(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return s
def forward(self, x, y):
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y)
s = s.permute(0, 2, 3, 1)
return s
class MLP(nn.Module):
def __init__(self, n_in, n_out, dropout=0):
super().__init__()
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.GELU()
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.dropout(x)
x = self.linear(x)
x = self.activation(x)
return x
class CoPredictor(nn.Module):
def __init__(self, cls_num, hid_size, biaffine_size, channels,
ffnn_hid_size, dropout=0):
super().__init__()
self.mlp1 = MLP(n_in=hid_size, n_out=biaffine_size, dropout=dropout)
self.mlp2 = MLP(n_in=hid_size, n_out=biaffine_size, dropout=dropout)
self.biaffine = Biaffine(n_in=biaffine_size, n_out=cls_num, bias_x=
True, bias_y=True)
self.mlp_rel = MLP(channels, ffnn_hid_size, dropout=dropout)
self.linear = nn.Linear(ffnn_hid_size, cls_num)
self.dropout = nn.Dropout(dropout)
def forward(self, x, y, z):
ent_sub = self.dropout(self.mlp1(x))
ent_obj = self.dropout(self.mlp2(y))
o1 = self.biaffine(ent_sub, ent_obj)
z = self.dropout(self.mlp_rel(z))
o2 = self.linear(z)
return o1 + o2
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'cls_num': 4, 'hid_size': 4, 'biaffine_size': 4,
'channels': 4, 'ffnn_hid_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.autograd
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = 0.5
tmp7 = tmp5 * tmp6
tmp8 = 0.7071067811865476
tmp9 = tmp5 * tmp8
tmp10 = libdevice.erf(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp7 * tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp19 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp20 = tl.where(tmp16, tmp11, tmp19)
tmp21 = tl.where(tmp4, tmp15, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 100
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5 % 4
x2 = xindex // 20
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 5 * x2 + 25 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 20
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 4
x3 = xindex // 4
y0 = yindex % 5
y1 = yindex // 5
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 5 * x3 + 20 * x2 + 80 * y1), xmask &
ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x5 + 16 * y4), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_gelu_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x5 = xindex // 4 % 16
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + (x5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x4, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 5, 5), (25, 5, 1))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(primals_4, (16,
4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_5
del primals_6
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(80)](buf0, buf2, 80, XBLOCK=128,
num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((5, 4, 1, 5, 1, 1), (20, 5, 5, 1, 1, 1),
torch.float32)
triton_poi_fused_clone_1[grid(100)](primals_7, buf3, 100, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_7
buf4 = empty_strided_cuda((1, 16, 20), (320, 20, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (1, 16, 5), (0, 5, 1),
0), reinterpret_tensor(buf3, (1, 5, 20), (0, 20, 1), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
triton_poi_fused_cat_0[grid(80)](buf1, buf5, 80, XBLOCK=128,
num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((4, 5, 4, 4, 1, 1), (80, 16, 4, 1, 1, 1),
torch.float32)
triton_poi_fused_clone_2[grid(20, 16)](buf4, buf6, 20, 16, XBLOCK=
16, YBLOCK=32, num_warps=4, num_stages=1)
del buf4
buf7 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(buf5, reinterpret_tensor(buf6, (4, 5, 16), (80,
16, 1), 0), out=buf7)
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_10, reinterpret_tensor(primals_8, (16,
4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf8)
del primals_10
del primals_9
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_gelu_3[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf10)
buf11 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 1, 16, 4), 0)
del buf7
triton_poi_fused_add_4[grid(256)](buf11, buf10, primals_12, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del buf10
del primals_12
return buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf0, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0
), buf1, reinterpret_tensor(primals_8, (16, 4), (4, 1), 0
), buf8, reinterpret_tensor(buf9, (16, 4), (4, 1), 0
), primals_11, reinterpret_tensor(buf5, (4, 5, 4), (20, 1, 5), 0
), reinterpret_tensor(buf6, (4, 16, 5), (80, 1, 16), 0
), reinterpret_tensor(buf2, (1, 5, 16), (80, 1, 5), 0
), reinterpret_tensor(buf3, (1, 20, 5), (100, 1, 20), 0)
class Biaffine(nn.Module):
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super(Biaffine, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
weight = torch.zeros((n_out, n_in + int(bias_x), n_in + int(bias_y)))
nn.init.xavier_normal_(weight)
self.weight = nn.Parameter(weight, requires_grad=True)
def extra_repr(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return s
def forward(self, x, y):
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y)
s = s.permute(0, 2, 3, 1)
return s
class MLP(nn.Module):
def __init__(self, n_in, n_out, dropout=0):
super().__init__()
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.GELU()
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.dropout(x)
x = self.linear(x)
x = self.activation(x)
return x
class CoPredictorNew(nn.Module):
def __init__(self, cls_num, hid_size, biaffine_size, channels,
ffnn_hid_size, dropout=0):
super().__init__()
self.mlp1 = MLP(n_in=hid_size, n_out=biaffine_size, dropout=dropout)
self.mlp2 = MLP(n_in=hid_size, n_out=biaffine_size, dropout=dropout)
self.biaffine = Biaffine(n_in=biaffine_size, n_out=cls_num, bias_x=
True, bias_y=True)
self.mlp_rel = MLP(channels, ffnn_hid_size, dropout=dropout)
self.linear = nn.Linear(ffnn_hid_size, cls_num)
self.dropout = nn.Dropout(dropout)
def forward(self, input_0, input_1, input_2):
primals_2 = self.mlp1.linear.weight
primals_3 = self.mlp1.linear.bias
primals_5 = self.mlp2.linear.weight
primals_6 = self.mlp2.linear.bias
primals_7 = self.biaffine.weight
primals_9 = self.mlp_rel.linear.weight
primals_10 = self.mlp_rel.linear.bias
primals_11 = self.linear.weight
primals_12 = self.linear.bias
primals_1 = input_0
primals_4 = input_1
primals_8 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| yifding/W2NER | CoPredictor | false | 13,150 | [
"MIT"
]
| 0 | d13128e45f3930a8b8faa794318939dc90a75974 | https://github.com/yifding/W2NER/tree/d13128e45f3930a8b8faa794318939dc90a75974 |
CRFOutputLayer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/dw/cdwmqu743nczlvrj7tkipbzih5kt2cf52yjyk66x6qqwoxzqkwcu.py
# Topologically Sorted Source Nodes: [add_1, max_1], Original ATen: [aten.add, aten.max]
# Source node to ATen node mapping:
# add_1 => add_1
# max_1 => max_1
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_2, %unsqueeze_1), kwargs = {})
# %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%add_1, 1), kwargs = {})
triton_poi_fused_add_max_0 = async_compile.triton('triton_poi_fused_add_max_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_max_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (16*x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + (16*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1))
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp13 = tl.load(in_ptr2 + (1))
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp16 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + (16*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (2))
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp23 = tl.load(in_ptr2 + (2))
tmp24 = tl.broadcast_to(tmp23, [XBLOCK])
tmp26 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (3 + (16*x1)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr1 + (3))
tmp31 = tl.broadcast_to(tmp30, [XBLOCK])
tmp33 = tl.load(in_ptr2 + (3))
tmp34 = tl.broadcast_to(tmp33, [XBLOCK])
tmp36 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp6 = tmp3 + tmp5
tmp8 = tmp6 + tmp7
tmp12 = tmp9 + tmp11
tmp15 = tmp12 + tmp14
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp22 = tmp19 + tmp21
tmp25 = tmp22 + tmp24
tmp27 = tmp25 + tmp26
tmp28 = triton_helpers.maximum(tmp18, tmp27)
tmp32 = tmp29 + tmp31
tmp35 = tmp32 + tmp34
tmp37 = tmp35 + tmp36
tmp38 = triton_helpers.maximum(tmp28, tmp37)
tmp39 = tmp8 > tmp17
tmp40 = tmp8 == tmp17
tmp41 = tmp8 != tmp8
tmp42 = tmp17 != tmp17
tmp43 = tmp41 > tmp42
tmp44 = tmp39 | tmp43
tmp45 = tmp41 & tmp42
tmp46 = tmp40 | tmp45
tmp47 = tl.full([1], 0, tl.int64)
tmp48 = tl.full([1], 1, tl.int64)
tmp49 = tmp47 < tmp48
tmp50 = tmp46 & tmp49
tmp51 = tmp44 | tmp50
tmp52 = tl.where(tmp51, tmp8, tmp17)
tmp53 = tl.where(tmp51, tmp47, tmp48)
tmp54 = tmp52 > tmp27
tmp55 = tmp52 == tmp27
tmp56 = tmp52 != tmp52
tmp57 = tmp27 != tmp27
tmp58 = tmp56 > tmp57
tmp59 = tmp54 | tmp58
tmp60 = tmp56 & tmp57
tmp61 = tmp55 | tmp60
tmp62 = tl.full([1], 2, tl.int64)
tmp63 = tmp53 < tmp62
tmp64 = tmp61 & tmp63
tmp65 = tmp59 | tmp64
tmp66 = tl.where(tmp65, tmp52, tmp27)
tmp67 = tl.where(tmp65, tmp53, tmp62)
tmp68 = tmp66 > tmp37
tmp69 = tmp66 == tmp37
tmp70 = tmp66 != tmp66
tmp71 = tmp37 != tmp37
tmp72 = tmp70 > tmp71
tmp73 = tmp68 | tmp72
tmp74 = tmp70 & tmp71
tmp75 = tmp69 | tmp74
tmp76 = tl.full([1], 3, tl.int64)
tmp77 = tmp67 < tmp76
tmp78 = tmp75 & tmp77
tmp79 = tmp73 | tmp78
tmp80 = tl.where(tmp79, tmp66, tmp37)
tmp81 = tl.where(tmp79, tmp67, tmp76)
tl.store(out_ptr0 + (x2), tmp38, xmask)
tl.store(out_ptr1 + (x2), tmp81, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ho/cho3kyyvvlfzvcgnxvqbk6afqxqu2eee4bblrr2c4njpbeom6354.py
# Topologically Sorted Source Nodes: [add_3, max_2], Original ATen: [aten.add, aten.max]
# Source node to ATen node mapping:
# add_3 => add_3
# max_2 => max_2
# Graph fragment:
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_3, %unsqueeze_1), kwargs = {})
# %max_2 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%add_3, 1), kwargs = {})
triton_poi_fused_add_max_1 = async_compile.triton('triton_poi_fused_add_max_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_max_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_max_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 + (16*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (5 + (16*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (1))
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (6 + (16*x1)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr2 + (2))
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp23 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (7 + (16*x1)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (3))
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp32 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 + tmp6
tmp12 = tmp9 + tmp11
tmp13 = tmp8 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp7, tmp15)
tmp21 = tmp18 + tmp20
tmp22 = tmp17 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = triton_helpers.maximum(tmp16, tmp24)
tmp30 = tmp27 + tmp29
tmp31 = tmp26 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = triton_helpers.maximum(tmp25, tmp33)
tmp35 = tmp7 > tmp15
tmp36 = tmp7 == tmp15
tmp37 = tmp7 != tmp7
tmp38 = tmp15 != tmp15
tmp39 = tmp37 > tmp38
tmp40 = tmp35 | tmp39
tmp41 = tmp37 & tmp38
tmp42 = tmp36 | tmp41
tmp43 = tl.full([1], 0, tl.int64)
tmp44 = tl.full([1], 1, tl.int64)
tmp45 = tmp43 < tmp44
tmp46 = tmp42 & tmp45
tmp47 = tmp40 | tmp46
tmp48 = tl.where(tmp47, tmp7, tmp15)
tmp49 = tl.where(tmp47, tmp43, tmp44)
tmp50 = tmp48 > tmp24
tmp51 = tmp48 == tmp24
tmp52 = tmp48 != tmp48
tmp53 = tmp24 != tmp24
tmp54 = tmp52 > tmp53
tmp55 = tmp50 | tmp54
tmp56 = tmp52 & tmp53
tmp57 = tmp51 | tmp56
tmp58 = tl.full([1], 2, tl.int64)
tmp59 = tmp49 < tmp58
tmp60 = tmp57 & tmp59
tmp61 = tmp55 | tmp60
tmp62 = tl.where(tmp61, tmp48, tmp24)
tmp63 = tl.where(tmp61, tmp49, tmp58)
tmp64 = tmp62 > tmp33
tmp65 = tmp62 == tmp33
tmp66 = tmp62 != tmp62
tmp67 = tmp33 != tmp33
tmp68 = tmp66 > tmp67
tmp69 = tmp64 | tmp68
tmp70 = tmp66 & tmp67
tmp71 = tmp65 | tmp70
tmp72 = tl.full([1], 3, tl.int64)
tmp73 = tmp63 < tmp72
tmp74 = tmp71 & tmp73
tmp75 = tmp69 | tmp74
tmp76 = tl.where(tmp75, tmp62, tmp33)
tmp77 = tl.where(tmp75, tmp63, tmp72)
tl.store(out_ptr0 + (x2), tmp34, xmask)
tl.store(out_ptr1 + (x2), tmp77, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/i2/ci2s3onbganqcbrkqgul24aydnipaukm6yf5o2l7jzdixgeapo6p.py
# Topologically Sorted Source Nodes: [add_5, max_3], Original ATen: [aten.add, aten.max]
# Source node to ATen node mapping:
# add_5 => add_5
# max_3 => max_3
# Graph fragment:
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_4, %unsqueeze_1), kwargs = {})
# %max_3 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%add_5, 1), kwargs = {})
triton_poi_fused_add_max_2 = async_compile.triton('triton_poi_fused_add_max_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_max_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_max_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (8 + (16*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (9 + (16*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (1))
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (10 + (16*x1)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr2 + (2))
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp23 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (11 + (16*x1)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (3))
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp32 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 + tmp6
tmp12 = tmp9 + tmp11
tmp13 = tmp8 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp7, tmp15)
tmp21 = tmp18 + tmp20
tmp22 = tmp17 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = triton_helpers.maximum(tmp16, tmp24)
tmp30 = tmp27 + tmp29
tmp31 = tmp26 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = triton_helpers.maximum(tmp25, tmp33)
tmp35 = tmp7 > tmp15
tmp36 = tmp7 == tmp15
tmp37 = tmp7 != tmp7
tmp38 = tmp15 != tmp15
tmp39 = tmp37 > tmp38
tmp40 = tmp35 | tmp39
tmp41 = tmp37 & tmp38
tmp42 = tmp36 | tmp41
tmp43 = tl.full([1], 0, tl.int64)
tmp44 = tl.full([1], 1, tl.int64)
tmp45 = tmp43 < tmp44
tmp46 = tmp42 & tmp45
tmp47 = tmp40 | tmp46
tmp48 = tl.where(tmp47, tmp7, tmp15)
tmp49 = tl.where(tmp47, tmp43, tmp44)
tmp50 = tmp48 > tmp24
tmp51 = tmp48 == tmp24
tmp52 = tmp48 != tmp48
tmp53 = tmp24 != tmp24
tmp54 = tmp52 > tmp53
tmp55 = tmp50 | tmp54
tmp56 = tmp52 & tmp53
tmp57 = tmp51 | tmp56
tmp58 = tl.full([1], 2, tl.int64)
tmp59 = tmp49 < tmp58
tmp60 = tmp57 & tmp59
tmp61 = tmp55 | tmp60
tmp62 = tl.where(tmp61, tmp48, tmp24)
tmp63 = tl.where(tmp61, tmp49, tmp58)
tmp64 = tmp62 > tmp33
tmp65 = tmp62 == tmp33
tmp66 = tmp62 != tmp62
tmp67 = tmp33 != tmp33
tmp68 = tmp66 > tmp67
tmp69 = tmp64 | tmp68
tmp70 = tmp66 & tmp67
tmp71 = tmp65 | tmp70
tmp72 = tl.full([1], 3, tl.int64)
tmp73 = tmp63 < tmp72
tmp74 = tmp71 & tmp73
tmp75 = tmp69 | tmp74
tmp76 = tl.where(tmp75, tmp62, tmp33)
tmp77 = tl.where(tmp75, tmp63, tmp72)
tl.store(out_ptr0 + (x2), tmp34, xmask)
tl.store(out_ptr1 + (x2), tmp77, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/cj/ccjggvguunan7bokqtiojzjz3sm3gwnc263uehtysgzmly66krb4.py
# Topologically Sorted Source Nodes: [v_6, add_7, max_4, tag_1, tag_2, tag_3], Original ATen: [aten.add, aten.max, aten.gather]
# Source node to ATen node mapping:
# add_7 => add_7
# max_4 => max_4
# tag_1 => gather
# tag_2 => gather_1
# tag_3 => gather_2
# v_6 => add_6
# Graph fragment:
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, %select_3), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %unsqueeze_5), kwargs = {})
# %max_4 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%add_7, 1, True), kwargs = {})
# %gather : [num_users=2] = call_function[target=torch.ops.aten.gather.default](args = (%getitem_5, 1, %getitem_7), kwargs = {})
# %gather_1 : [num_users=2] = call_function[target=torch.ops.aten.gather.default](args = (%getitem_3, 1, %gather), kwargs = {})
# %gather_2 : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%getitem_1, 1, %gather_1), kwargs = {})
triton_poi_fused_add_gather_max_3 = async_compile.triton('triton_poi_fused_add_gather_max_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: '*i64', 6: '*i64', 7: '*i64', 8: '*i64', 9: '*i64', 10: '*i64', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_gather_max_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_gather_max_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + (0))
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (1))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr3 + (1))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp33 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr1 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr2 + (2))
tmp36 = tl.broadcast_to(tmp35, [XBLOCK])
tmp39 = tl.load(in_ptr3 + (2))
tmp40 = tl.broadcast_to(tmp39, [XBLOCK])
tmp56 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr1 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp58 = tl.load(in_ptr2 + (3))
tmp59 = tl.broadcast_to(tmp58, [XBLOCK])
tmp62 = tl.load(in_ptr3 + (3))
tmp63 = tl.broadcast_to(tmp62, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp8 = tmp5 + tmp7
tmp13 = tmp10 + tmp12
tmp14 = tmp9 + tmp13
tmp17 = tmp14 + tmp16
tmp18 = tmp8 > tmp17
tmp19 = tmp8 == tmp17
tmp20 = tmp8 != tmp8
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 0, tl.int64)
tmp27 = tl.full([1], 1, tl.int64)
tmp28 = tmp26 < tmp27
tmp29 = tmp25 & tmp28
tmp30 = tmp23 | tmp29
tmp31 = tl.where(tmp30, tmp8, tmp17)
tmp32 = tl.where(tmp30, tmp26, tmp27)
tmp37 = tmp34 + tmp36
tmp38 = tmp33 + tmp37
tmp41 = tmp38 + tmp40
tmp42 = tmp31 > tmp41
tmp43 = tmp31 == tmp41
tmp44 = tmp31 != tmp31
tmp45 = tmp41 != tmp41
tmp46 = tmp44 > tmp45
tmp47 = tmp42 | tmp46
tmp48 = tmp44 & tmp45
tmp49 = tmp43 | tmp48
tmp50 = tl.full([1], 2, tl.int64)
tmp51 = tmp32 < tmp50
tmp52 = tmp49 & tmp51
tmp53 = tmp47 | tmp52
tmp54 = tl.where(tmp53, tmp31, tmp41)
tmp55 = tl.where(tmp53, tmp32, tmp50)
tmp60 = tmp57 + tmp59
tmp61 = tmp56 + tmp60
tmp64 = tmp61 + tmp63
tmp65 = tmp54 > tmp64
tmp66 = tmp54 == tmp64
tmp67 = tmp54 != tmp54
tmp68 = tmp64 != tmp64
tmp69 = tmp67 > tmp68
tmp70 = tmp65 | tmp69
tmp71 = tmp67 & tmp68
tmp72 = tmp66 | tmp71
tmp73 = tl.full([1], 3, tl.int64)
tmp74 = tmp55 < tmp73
tmp75 = tmp72 & tmp74
tmp76 = tmp70 | tmp75
tmp77 = tl.where(tmp76, tmp54, tmp64)
tmp78 = tl.where(tmp76, tmp55, tmp73)
tmp79 = tl.full([XBLOCK], 4, tl.int32)
tmp80 = tmp78 + tmp79
tmp81 = tmp78 < 0
tmp82 = tl.where(tmp81, tmp80, tmp78)
tl.device_assert(((0 <= tmp82) & (tmp82 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp82 < 4")
tmp84 = tl.load(in_ptr4 + (tmp82 + (4*x0)), xmask, eviction_policy='evict_last')
tmp85 = tmp84 + tmp79
tmp86 = tmp84 < 0
tmp87 = tl.where(tmp86, tmp85, tmp84)
tl.device_assert(((0 <= tmp87) & (tmp87 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp87 < 4")
tmp89 = tl.load(in_ptr5 + (tmp87 + (4*x0)), xmask, eviction_policy='evict_last')
tmp90 = tmp89 + tmp79
tmp91 = tmp89 < 0
tmp92 = tl.where(tmp91, tmp90, tmp89)
tl.device_assert(((0 <= tmp92) & (tmp92 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp92 < 4")
tmp94 = tl.load(in_ptr6 + (tmp92 + (4*x0)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (4*x0), tmp78, xmask)
tl.store(out_ptr1 + (4*x0), tmp94, xmask)
tl.store(out_ptr2 + (4*x0), tmp89, xmask)
tl.store(out_ptr3 + (4*x0), tmp84, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, ), (1, ))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg3_1, (4, ), (1, ))
assert_size_stride(arg4_1, (4, 4), (4, 1))
assert_size_stride(arg5_1, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(arg2_1, (16, 4), (4, 1), 0), reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0)
del arg0_1
del arg2_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
# Topologically Sorted Source Nodes: [add_1, max_1], Original ATen: [aten.add, aten.max]
stream0 = get_raw_stream(0)
triton_poi_fused_add_max_0.run(buf0, arg1_1, arg3_1, arg4_1, buf1, buf2, 16, grid=grid(16), stream=stream0)
del arg3_1
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
# Topologically Sorted Source Nodes: [add_3, max_2], Original ATen: [aten.add, aten.max]
triton_poi_fused_add_max_1.run(buf1, buf0, arg1_1, arg4_1, buf3, buf4, 16, grid=grid(16), stream=stream0)
buf5 = buf1; del buf1 # reuse
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
# Topologically Sorted Source Nodes: [add_5, max_3], Original ATen: [aten.add, aten.max]
triton_poi_fused_add_max_2.run(buf3, buf0, arg1_1, arg4_1, buf5, buf6, 16, grid=grid(16), stream=stream0)
del arg4_1
del buf3
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
buf7 = reinterpret_tensor(buf11, (4, 1), (4, 1), 3) # alias
buf8 = reinterpret_tensor(buf11, (4, 1), (4, 1), 0) # alias
buf9 = reinterpret_tensor(buf11, (4, 1), (4, 1), 1) # alias
buf10 = reinterpret_tensor(buf11, (4, 1), (4, 1), 2) # alias
# Topologically Sorted Source Nodes: [v_6, add_7, max_4, tag_1, tag_2, tag_3], Original ATen: [aten.add, aten.max, aten.gather]
triton_poi_fused_add_gather_max_3.run(buf5, buf0, arg1_1, arg5_1, buf6, buf4, buf2, buf7, buf8, buf9, buf10, 4, grid=grid(4), stream=stream0)
del arg1_1
del arg5_1
del buf0
del buf2
del buf4
del buf5
del buf6
return (buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
arg4_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg5_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CRF(nn.Module):
"""
Implements Conditional Random Fields that can be trained via
backpropagation.
"""
def __init__(self, num_tags):
super(CRF, self).__init__()
self.num_tags = num_tags
self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags))
self.start_transitions = nn.Parameter(torch.randn(num_tags))
self.stop_transitions = nn.Parameter(torch.randn(num_tags))
nn.init.xavier_normal_(self.transitions)
def forward(self, feats):
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
return self._viterbi(feats)
def loss(self, feats, tags):
"""
Computes negative log likelihood between features and tags.
Essentially difference between individual sequence scores and
sum of all possible sequence scores (partition function)
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns:
Negative log likelihood [a scalar]
"""
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
if len(tags.shape) != 2:
raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape)
)
if feats.shape[:2] != tags.shape:
raise ValueError(
'First two dimensions of feats and tags must match')
sequence_score = self._sequence_score(feats, tags)
partition_function = self._partition_function(feats)
log_probability = sequence_score - partition_function
return -log_probability.mean()
def _sequence_score(self, feats, tags):
"""
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns: Sequence score of shape [batch size]
"""
feats.shape[0]
feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1
)
tags_pairs = tags.unfold(1, 2, 1)
indices = tags_pairs.permute(2, 0, 1).chunk(2)
trans_score = self.transitions[indices].squeeze(0).sum(dim=-1)
start_score = self.start_transitions[tags[:, 0]]
stop_score = self.stop_transitions[tags[:, -1]]
return feat_score + start_score + trans_score + stop_score
def _partition_function(self, feats):
"""
Computes the partitition function for CRF using the forward algorithm.
Basically calculate scores for all possible tag sequences for
the given feature vector sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns:
Total scores of shape [batch size]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
a = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
for i in range(1, seq_size):
feat = feats[:, i].unsqueeze(1)
a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1)
return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1)
def _viterbi(self, feats):
"""
Uses Viterbi algorithm to predict the best sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns: Best tag sequence [batch size, sequence length]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
v = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
paths = []
for i in range(1, seq_size):
feat = feats[:, i]
v, idx = (v.unsqueeze(-1) + transitions).max(1)
paths.append(idx)
v = v + feat
v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True)
tags = [tag]
for idx in reversed(paths):
tag = idx.gather(1, tag)
tags.append(tag)
tags.reverse()
return torch.cat(tags, 1)
def _log_sum_exp(self, logits, dim):
"""
Computes log-sum-exp in a stable way
"""
max_val, _ = logits.max(dim)
return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log()
class OutputLayer(nn.Module):
"""
Abstract base class for output layer.
Handles projection to output labels
"""
def __init__(self, hidden_size, output_size):
super(OutputLayer, self).__init__()
self.output_size = output_size
self.output_projection = nn.Linear(hidden_size, output_size)
def loss(self, hidden, labels):
raise NotImplementedError('Must implement {}.loss'.format(self.
__class__.__name__))
class CRFOutputLayer(OutputLayer):
"""
Implements a CRF based output layer
"""
def __init__(self, hidden_size, output_size):
super(CRFOutputLayer, self).__init__(hidden_size, output_size)
self.crf = CRF(output_size)
def forward(self, hidden):
feats = self.output_projection(hidden)
return self.crf(feats)
def loss(self, hidden, labels):
feats = self.output_projection(hidden)
return self.crf.loss(feats, labels)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'output_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_max_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr1 + 1)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp13 = tl.load(in_ptr2 + 1)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp16 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + 2)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp23 = tl.load(in_ptr2 + 2)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK])
tmp26 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (3 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr1 + 3)
tmp31 = tl.broadcast_to(tmp30, [XBLOCK])
tmp33 = tl.load(in_ptr2 + 3)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK])
tmp36 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp6 = tmp3 + tmp5
tmp8 = tmp6 + tmp7
tmp12 = tmp9 + tmp11
tmp15 = tmp12 + tmp14
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp22 = tmp19 + tmp21
tmp25 = tmp22 + tmp24
tmp27 = tmp25 + tmp26
tmp28 = triton_helpers.maximum(tmp18, tmp27)
tmp32 = tmp29 + tmp31
tmp35 = tmp32 + tmp34
tmp37 = tmp35 + tmp36
tmp38 = triton_helpers.maximum(tmp28, tmp37)
tmp39 = tmp8 > tmp17
tmp40 = tmp8 == tmp17
tmp41 = tmp8 != tmp8
tmp42 = tmp17 != tmp17
tmp43 = tmp41 > tmp42
tmp44 = tmp39 | tmp43
tmp45 = tmp41 & tmp42
tmp46 = tmp40 | tmp45
tmp47 = tl.full([1], 0, tl.int64)
tmp48 = tl.full([1], 1, tl.int64)
tmp49 = tmp47 < tmp48
tmp50 = tmp46 & tmp49
tmp51 = tmp44 | tmp50
tmp52 = tl.where(tmp51, tmp8, tmp17)
tmp53 = tl.where(tmp51, tmp47, tmp48)
tmp54 = tmp52 > tmp27
tmp55 = tmp52 == tmp27
tmp56 = tmp52 != tmp52
tmp57 = tmp27 != tmp27
tmp58 = tmp56 > tmp57
tmp59 = tmp54 | tmp58
tmp60 = tmp56 & tmp57
tmp61 = tmp55 | tmp60
tmp62 = tl.full([1], 2, tl.int64)
tmp63 = tmp53 < tmp62
tmp64 = tmp61 & tmp63
tmp65 = tmp59 | tmp64
tmp66 = tl.where(tmp65, tmp52, tmp27)
tmp67 = tl.where(tmp65, tmp53, tmp62)
tmp68 = tmp66 > tmp37
tmp69 = tmp66 == tmp37
tmp70 = tmp66 != tmp66
tmp71 = tmp37 != tmp37
tmp72 = tmp70 > tmp71
tmp73 = tmp68 | tmp72
tmp74 = tmp70 & tmp71
tmp75 = tmp69 | tmp74
tmp76 = tl.full([1], 3, tl.int64)
tmp77 = tmp67 < tmp76
tmp78 = tmp75 & tmp77
tmp79 = tmp73 | tmp78
tl.where(tmp79, tmp66, tmp37)
tmp81 = tl.where(tmp79, tmp67, tmp76)
tl.store(out_ptr0 + x2, tmp38, xmask)
tl.store(out_ptr1 + x2, tmp81, xmask)
@triton.jit
def triton_poi_fused_add_max_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (5 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr2 + 1)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (6 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr2 + 2)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp23 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr1 + (7 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr2 + 3)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp32 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 + tmp6
tmp12 = tmp9 + tmp11
tmp13 = tmp8 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp7, tmp15)
tmp21 = tmp18 + tmp20
tmp22 = tmp17 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = triton_helpers.maximum(tmp16, tmp24)
tmp30 = tmp27 + tmp29
tmp31 = tmp26 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = triton_helpers.maximum(tmp25, tmp33)
tmp35 = tmp7 > tmp15
tmp36 = tmp7 == tmp15
tmp37 = tmp7 != tmp7
tmp38 = tmp15 != tmp15
tmp39 = tmp37 > tmp38
tmp40 = tmp35 | tmp39
tmp41 = tmp37 & tmp38
tmp42 = tmp36 | tmp41
tmp43 = tl.full([1], 0, tl.int64)
tmp44 = tl.full([1], 1, tl.int64)
tmp45 = tmp43 < tmp44
tmp46 = tmp42 & tmp45
tmp47 = tmp40 | tmp46
tmp48 = tl.where(tmp47, tmp7, tmp15)
tmp49 = tl.where(tmp47, tmp43, tmp44)
tmp50 = tmp48 > tmp24
tmp51 = tmp48 == tmp24
tmp52 = tmp48 != tmp48
tmp53 = tmp24 != tmp24
tmp54 = tmp52 > tmp53
tmp55 = tmp50 | tmp54
tmp56 = tmp52 & tmp53
tmp57 = tmp51 | tmp56
tmp58 = tl.full([1], 2, tl.int64)
tmp59 = tmp49 < tmp58
tmp60 = tmp57 & tmp59
tmp61 = tmp55 | tmp60
tmp62 = tl.where(tmp61, tmp48, tmp24)
tmp63 = tl.where(tmp61, tmp49, tmp58)
tmp64 = tmp62 > tmp33
tmp65 = tmp62 == tmp33
tmp66 = tmp62 != tmp62
tmp67 = tmp33 != tmp33
tmp68 = tmp66 > tmp67
tmp69 = tmp64 | tmp68
tmp70 = tmp66 & tmp67
tmp71 = tmp65 | tmp70
tmp72 = tl.full([1], 3, tl.int64)
tmp73 = tmp63 < tmp72
tmp74 = tmp71 & tmp73
tmp75 = tmp69 | tmp74
tl.where(tmp75, tmp62, tmp33)
tmp77 = tl.where(tmp75, tmp63, tmp72)
tl.store(out_ptr0 + x2, tmp34, xmask)
tl.store(out_ptr1 + x2, tmp77, xmask)
@triton.jit
def triton_poi_fused_add_max_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (8 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (9 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr2 + 1)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (10 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr2 + 2)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp23 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr1 + (11 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr2 + 3)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp32 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 + tmp6
tmp12 = tmp9 + tmp11
tmp13 = tmp8 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp7, tmp15)
tmp21 = tmp18 + tmp20
tmp22 = tmp17 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = triton_helpers.maximum(tmp16, tmp24)
tmp30 = tmp27 + tmp29
tmp31 = tmp26 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = triton_helpers.maximum(tmp25, tmp33)
tmp35 = tmp7 > tmp15
tmp36 = tmp7 == tmp15
tmp37 = tmp7 != tmp7
tmp38 = tmp15 != tmp15
tmp39 = tmp37 > tmp38
tmp40 = tmp35 | tmp39
tmp41 = tmp37 & tmp38
tmp42 = tmp36 | tmp41
tmp43 = tl.full([1], 0, tl.int64)
tmp44 = tl.full([1], 1, tl.int64)
tmp45 = tmp43 < tmp44
tmp46 = tmp42 & tmp45
tmp47 = tmp40 | tmp46
tmp48 = tl.where(tmp47, tmp7, tmp15)
tmp49 = tl.where(tmp47, tmp43, tmp44)
tmp50 = tmp48 > tmp24
tmp51 = tmp48 == tmp24
tmp52 = tmp48 != tmp48
tmp53 = tmp24 != tmp24
tmp54 = tmp52 > tmp53
tmp55 = tmp50 | tmp54
tmp56 = tmp52 & tmp53
tmp57 = tmp51 | tmp56
tmp58 = tl.full([1], 2, tl.int64)
tmp59 = tmp49 < tmp58
tmp60 = tmp57 & tmp59
tmp61 = tmp55 | tmp60
tmp62 = tl.where(tmp61, tmp48, tmp24)
tmp63 = tl.where(tmp61, tmp49, tmp58)
tmp64 = tmp62 > tmp33
tmp65 = tmp62 == tmp33
tmp66 = tmp62 != tmp62
tmp67 = tmp33 != tmp33
tmp68 = tmp66 > tmp67
tmp69 = tmp64 | tmp68
tmp70 = tmp66 & tmp67
tmp71 = tmp65 | tmp70
tmp72 = tl.full([1], 3, tl.int64)
tmp73 = tmp63 < tmp72
tmp74 = tmp71 & tmp73
tmp75 = tmp69 | tmp74
tl.where(tmp75, tmp62, tmp33)
tmp77 = tl.where(tmp75, tmp63, tmp72)
tl.store(out_ptr0 + x2, tmp34, xmask)
tl.store(out_ptr1 + x2, tmp77, xmask)
@triton.jit
def triton_poi_fused_add_gather_max_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr2 + 1)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr3 + 1)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp33 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp34 = tl.load(in_ptr1 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp35 = tl.load(in_ptr2 + 2)
tmp36 = tl.broadcast_to(tmp35, [XBLOCK])
tmp39 = tl.load(in_ptr3 + 2)
tmp40 = tl.broadcast_to(tmp39, [XBLOCK])
tmp56 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp57 = tl.load(in_ptr1 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp58 = tl.load(in_ptr2 + 3)
tmp59 = tl.broadcast_to(tmp58, [XBLOCK])
tmp62 = tl.load(in_ptr3 + 3)
tmp63 = tl.broadcast_to(tmp62, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp8 = tmp5 + tmp7
tmp13 = tmp10 + tmp12
tmp14 = tmp9 + tmp13
tmp17 = tmp14 + tmp16
tmp18 = tmp8 > tmp17
tmp19 = tmp8 == tmp17
tmp20 = tmp8 != tmp8
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 0, tl.int64)
tmp27 = tl.full([1], 1, tl.int64)
tmp28 = tmp26 < tmp27
tmp29 = tmp25 & tmp28
tmp30 = tmp23 | tmp29
tmp31 = tl.where(tmp30, tmp8, tmp17)
tmp32 = tl.where(tmp30, tmp26, tmp27)
tmp37 = tmp34 + tmp36
tmp38 = tmp33 + tmp37
tmp41 = tmp38 + tmp40
tmp42 = tmp31 > tmp41
tmp43 = tmp31 == tmp41
tmp44 = tmp31 != tmp31
tmp45 = tmp41 != tmp41
tmp46 = tmp44 > tmp45
tmp47 = tmp42 | tmp46
tmp48 = tmp44 & tmp45
tmp49 = tmp43 | tmp48
tmp50 = tl.full([1], 2, tl.int64)
tmp51 = tmp32 < tmp50
tmp52 = tmp49 & tmp51
tmp53 = tmp47 | tmp52
tmp54 = tl.where(tmp53, tmp31, tmp41)
tmp55 = tl.where(tmp53, tmp32, tmp50)
tmp60 = tmp57 + tmp59
tmp61 = tmp56 + tmp60
tmp64 = tmp61 + tmp63
tmp65 = tmp54 > tmp64
tmp66 = tmp54 == tmp64
tmp67 = tmp54 != tmp54
tmp68 = tmp64 != tmp64
tmp69 = tmp67 > tmp68
tmp70 = tmp65 | tmp69
tmp71 = tmp67 & tmp68
tmp72 = tmp66 | tmp71
tmp73 = tl.full([1], 3, tl.int64)
tmp74 = tmp55 < tmp73
tmp75 = tmp72 & tmp74
tmp76 = tmp70 | tmp75
tl.where(tmp76, tmp54, tmp64)
tmp78 = tl.where(tmp76, tmp55, tmp73)
tmp79 = tl.full([XBLOCK], 4, tl.int32)
tmp80 = tmp78 + tmp79
tmp81 = tmp78 < 0
tmp82 = tl.where(tmp81, tmp80, tmp78)
tl.device_assert((0 <= tmp82) & (tmp82 < 4) | ~xmask,
'index out of bounds: 0 <= tmp82 < 4')
tmp84 = tl.load(in_ptr4 + (tmp82 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp85 = tmp84 + tmp79
tmp86 = tmp84 < 0
tmp87 = tl.where(tmp86, tmp85, tmp84)
tl.device_assert((0 <= tmp87) & (tmp87 < 4) | ~xmask,
'index out of bounds: 0 <= tmp87 < 4')
tmp89 = tl.load(in_ptr5 + (tmp87 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp90 = tmp89 + tmp79
tmp91 = tmp89 < 0
tmp92 = tl.where(tmp91, tmp90, tmp89)
tl.device_assert((0 <= tmp92) & (tmp92 < 4) | ~xmask,
'index out of bounds: 0 <= tmp92 < 4')
tmp94 = tl.load(in_ptr6 + (tmp92 + 4 * x0), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + 4 * x0, tmp78, xmask)
tl.store(out_ptr1 + 4 * x0, tmp94, xmask)
tl.store(out_ptr2 + 4 * x0, tmp89, xmask)
tl.store(out_ptr3 + 4 * x0, tmp84, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4,), (1,))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg3_1, (4,), (1,))
assert_size_stride(arg4_1, (4, 4), (4, 1))
assert_size_stride(arg5_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(arg2_1, (16, 4), (4, 1), 0),
reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0)
del arg0_1
del arg2_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused_add_max_0[grid(16)](buf0, arg1_1, arg3_1, arg4_1,
buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg3_1
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
triton_poi_fused_add_max_1[grid(16)](buf1, buf0, arg1_1, arg4_1,
buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf5 = buf1
del buf1
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
triton_poi_fused_add_max_2[grid(16)](buf3, buf0, arg1_1, arg4_1,
buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg4_1
del buf3
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
buf7 = reinterpret_tensor(buf11, (4, 1), (4, 1), 3)
buf8 = reinterpret_tensor(buf11, (4, 1), (4, 1), 0)
buf9 = reinterpret_tensor(buf11, (4, 1), (4, 1), 1)
buf10 = reinterpret_tensor(buf11, (4, 1), (4, 1), 2)
triton_poi_fused_add_gather_max_3[grid(4)](buf5, buf0, arg1_1,
arg5_1, buf6, buf4, buf2, buf7, buf8, buf9, buf10, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del arg1_1
del arg5_1
del buf0
del buf2
del buf4
del buf5
del buf6
return buf11,
class CRF(nn.Module):
"""
Implements Conditional Random Fields that can be trained via
backpropagation.
"""
def __init__(self, num_tags):
super(CRF, self).__init__()
self.num_tags = num_tags
self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags))
self.start_transitions = nn.Parameter(torch.randn(num_tags))
self.stop_transitions = nn.Parameter(torch.randn(num_tags))
nn.init.xavier_normal_(self.transitions)
def forward(self, feats):
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
return self._viterbi(feats)
def loss(self, feats, tags):
"""
Computes negative log likelihood between features and tags.
Essentially difference between individual sequence scores and
sum of all possible sequence scores (partition function)
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns:
Negative log likelihood [a scalar]
"""
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
if len(tags.shape) != 2:
raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape)
)
if feats.shape[:2] != tags.shape:
raise ValueError(
'First two dimensions of feats and tags must match')
sequence_score = self._sequence_score(feats, tags)
partition_function = self._partition_function(feats)
log_probability = sequence_score - partition_function
return -log_probability.mean()
def _sequence_score(self, feats, tags):
"""
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns: Sequence score of shape [batch size]
"""
feats.shape[0]
feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1
)
tags_pairs = tags.unfold(1, 2, 1)
indices = tags_pairs.permute(2, 0, 1).chunk(2)
trans_score = self.transitions[indices].squeeze(0).sum(dim=-1)
start_score = self.start_transitions[tags[:, 0]]
stop_score = self.stop_transitions[tags[:, -1]]
return feat_score + start_score + trans_score + stop_score
def _partition_function(self, feats):
"""
Computes the partitition function for CRF using the forward algorithm.
Basically calculate scores for all possible tag sequences for
the given feature vector sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns:
Total scores of shape [batch size]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
a = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
for i in range(1, seq_size):
feat = feats[:, i].unsqueeze(1)
a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1)
return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1)
def _viterbi(self, feats):
"""
Uses Viterbi algorithm to predict the best sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns: Best tag sequence [batch size, sequence length]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
v = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
paths = []
for i in range(1, seq_size):
feat = feats[:, i]
v, idx = (v.unsqueeze(-1) + transitions).max(1)
paths.append(idx)
v = v + feat
v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True)
tags = [tag]
for idx in reversed(paths):
tag = idx.gather(1, tag)
tags.append(tag)
tags.reverse()
return torch.cat(tags, 1)
def _log_sum_exp(self, logits, dim):
"""
Computes log-sum-exp in a stable way
"""
max_val, _ = logits.max(dim)
return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log()
class OutputLayer(nn.Module):
"""
Abstract base class for output layer.
Handles projection to output labels
"""
def __init__(self, hidden_size, output_size):
super(OutputLayer, self).__init__()
self.output_size = output_size
self.output_projection = nn.Linear(hidden_size, output_size)
def loss(self, hidden, labels):
raise NotImplementedError('Must implement {}.loss'.format(self.
__class__.__name__))
class CRFOutputLayerNew(OutputLayer):
"""
Implements a CRF based output layer
"""
def __init__(self, hidden_size, output_size):
super(CRFOutputLayerNew, self).__init__(hidden_size, output_size)
self.crf = CRF(output_size)
def loss(self, hidden, labels):
feats = self.output_projection(hidden)
return self.crf.loss(feats, labels)
def forward(self, input_0):
arg0_1 = self.output_projection.weight
arg1_1 = self.output_projection.bias
arg4_1 = self.crf.transitions
arg3_1 = self.crf.start_transitions
arg5_1 = self.crf.stop_transitions
arg2_1 = input_0
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1])
return output[0]
| yezhengli-Mr9/torchnlp | CRFOutputLayer | false | 13,151 | [
"Apache-2.0"
]
| 0 | 0f2ad6d149a413da9f03c6f6694c429746de6551 | https://github.com/yezhengli-Mr9/torchnlp/tree/0f2ad6d149a413da9f03c6f6694c429746de6551 |
CondConv2D | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/x7/cx7uk52yrxaubfry3qewxwr5xsbzes7aafpe4s2nfqjwzfv2hzgi.py
# Topologically Sorted Source Nodes: [pooled_inputs], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# pooled_inputs => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/tx/ctxylwbqyqv7jxomlnzjkduvl3ty4orkqihdk3clysubo6w27rdl.py
# Topologically Sorted Source Nodes: [mul, kernels], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# kernels => sum_1
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_3, %primals_4), kwargs = {})
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [0]), kwargs = {})
triton_poi_fused_mul_sum_1 = async_compile.triton('triton_poi_fused_mul_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp3 = tl.load(in_ptr1 + (x0), xmask)
tmp5 = tl.load(in_ptr0 + (1))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp8 = tl.load(in_ptr1 + (256 + x0), xmask)
tmp11 = tl.load(in_ptr0 + (2))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp14 = tl.load(in_ptr1 + (512 + x0), xmask)
tmp2 = tl.sigmoid(tmp1)
tmp4 = tmp2 * tmp3
tmp7 = tl.sigmoid(tmp6)
tmp9 = tmp7 * tmp8
tmp10 = tmp4 + tmp9
tmp13 = tl.sigmoid(tmp12)
tmp15 = tmp13 * tmp14
tmp16 = tmp10 + tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/az/cazousalzuqn73ciahz5izvogzu4ekcsktal4tthjvwjd3cqdayz.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_4, %sum_1, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (3, 4), (4, 1))
assert_size_stride(primals_3, (3, ), (1, ))
assert_size_stride(primals_4, (3, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [pooled_inputs], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 4, 16, grid=grid(4), stream=stream0)
buf2 = empty_strided_cuda((1, 3), (3, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(buf1, (1, 4), (0, 1), 0), reinterpret_tensor(primals_2, (4, 3), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, kernels], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_1.run(buf2, primals_4, buf3, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (64, 16, 4, 1), 0), buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (1, 4, 1, 1), (4, 1, 1, 1))
buf5 = reinterpret_tensor(buf4, (1, 4, 1, 1), (4, 1, 4, 4), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf5, primals_5, 4, grid=grid(4), stream=stream0)
del primals_5
return (reinterpret_tensor(buf5, (4, 1, 1), (1, 1, 1), 0), primals_4, reinterpret_tensor(buf1, (1, 4), (4, 1), 0), buf2, buf3, reinterpret_tensor(primals_1, (1, 4, 4, 4), (64, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((3, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((3, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import functools
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules.utils import _pair
from torch.nn.parameter import Parameter
class _routing(nn.Module):
def __init__(self, in_channels, num_experts, dropout_rate):
super(_routing, self).__init__()
self.dropout = nn.Dropout(dropout_rate)
self.fc = nn.Linear(in_channels, num_experts)
def forward(self, x):
x = torch.flatten(x)
x = self.dropout(x)
x = self.fc(x)
return F.sigmoid(x)
class CondConv2D(_ConvNd):
"""Learn specialized convolutional kernels for each example.
As described in the paper
`CondConv: Conditionally Parameterized Convolutions for Efficient Inference`_ ,
conditionally parameterized convolutions (CondConv),
which challenge the paradigm of static convolutional kernels
by computing convolutional kernels as a function of the input.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
padding_mode (string, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
num_experts (int): Number of experts per layer
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` where
.. math::
H_{out} = \\left\\lfloor\\frac{H_{in} + 2 \\times \\text{padding}[0] - \\text{dilation}[0]
\\times (\\text{kernel\\_size}[0] - 1) - 1}{\\text{stride}[0]} + 1\\right\\rfloor
.. math::
W_{out} = \\left\\lfloor\\frac{W_{in} + 2 \\times \\text{padding}[1] - \\text{dilation}[1]
\\times (\\text{kernel\\_size}[1] - 1) - 1}{\\text{stride}[1]} + 1\\right\\rfloor
Attributes:
weight (Tensor): the learnable weights of the module of shape
:math:`(\\text{out\\_channels}, \\frac{\\text{in\\_channels}}{\\text{groups}},`
:math:`\\text{kernel\\_size[0]}, \\text{kernel\\_size[1]})`.
The values of these weights are sampled from
:math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where
:math:`k = \\frac{groups}{C_\\text{in} * \\prod_{i=0}^{1}\\text{kernel\\_size}[i]}`
bias (Tensor): the learnable bias of the module of shape (out_channels). If :attr:`bias` is ``True``,
then the values of these weights are
sampled from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where
:math:`k = \\frac{groups}{C_\\text{in} * \\prod_{i=0}^{1}\\text{kernel\\_size}[i]}`
.. _CondConv: Conditionally Parameterized Convolutions for Efficient Inference:
https://arxiv.org/abs/1904.04971
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros',
num_experts=3, dropout_rate=0.2):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(CondConv2D, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, dilation, False, _pair(0), groups,
bias, padding_mode)
self._avg_pooling = functools.partial(F.adaptive_avg_pool2d,
output_size=(1, 1))
self._routing_fn = _routing(in_channels, num_experts, dropout_rate)
self.weight = Parameter(torch.Tensor(num_experts, out_channels,
in_channels // groups, *kernel_size))
self.reset_parameters()
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._padding_repeated_twice, mode
=self.padding_mode), weight, self.bias, self.stride, _pair(
0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride, self.padding,
self.dilation, self.groups)
def forward(self, input):
pooled_inputs = self._avg_pooling(input)
routing_weights = self._routing_fn(pooled_inputs)
kernels = torch.sum(routing_weights[:, None, None, None, None] *
self.weight, 0)
return self._conv_forward(input, kernels)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import functools
from torch import nn
import torch.nn.functional as F
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules.utils import _pair
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp5 = tl.load(in_ptr0 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp8 = tl.load(in_ptr1 + (256 + x0), xmask)
tmp11 = tl.load(in_ptr0 + 2)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp14 = tl.load(in_ptr1 + (512 + x0), xmask)
tmp2 = tl.sigmoid(tmp1)
tmp4 = tmp2 * tmp3
tmp7 = tl.sigmoid(tmp6)
tmp9 = tmp7 * tmp8
tmp10 = tmp4 + tmp9
tmp13 = tl.sigmoid(tmp12)
tmp15 = tmp13 * tmp14
tmp16 = tmp10 + tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (3, 4), (4, 1))
assert_size_stride(primals_3, (3,), (1,))
assert_size_stride(primals_4, (3, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(4)](buf1, primals_1, 4, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((1, 3), (3, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf1, (1, 4), (0,
1), 0), reinterpret_tensor(primals_2, (4, 3), (1, 4), 0), alpha
=1, beta=1, out=buf2)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_1[grid(256)](buf2, primals_4, buf3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
4, 4, 4), (64, 16, 4, 1), 0), buf3, stride=(1, 1), padding=(0,
0), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf4, (1, 4, 1, 1), (4, 1, 1, 1))
buf5 = reinterpret_tensor(buf4, (1, 4, 1, 1), (4, 1, 4, 4), 0)
del buf4
triton_poi_fused_convolution_2[grid(4)](buf5, primals_5, 4, XBLOCK=
4, num_warps=1, num_stages=1)
del primals_5
return reinterpret_tensor(buf5, (4, 1, 1), (1, 1, 1), 0
), primals_4, reinterpret_tensor(buf1, (1, 4), (4, 1), 0
), buf2, buf3, reinterpret_tensor(primals_1, (1, 4, 4, 4), (64, 16,
4, 1), 0)
class _routing(nn.Module):
def __init__(self, in_channels, num_experts, dropout_rate):
super(_routing, self).__init__()
self.dropout = nn.Dropout(dropout_rate)
self.fc = nn.Linear(in_channels, num_experts)
def forward(self, x):
x = torch.flatten(x)
x = self.dropout(x)
x = self.fc(x)
return F.sigmoid(x)
class CondConv2DNew(_ConvNd):
"""Learn specialized convolutional kernels for each example.
As described in the paper
`CondConv: Conditionally Parameterized Convolutions for Efficient Inference`_ ,
conditionally parameterized convolutions (CondConv),
which challenge the paradigm of static convolutional kernels
by computing convolutional kernels as a function of the input.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
padding_mode (string, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
num_experts (int): Number of experts per layer
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` where
.. math::
H_{out} = \\left\\lfloor\\frac{H_{in} + 2 \\times \\text{padding}[0] - \\text{dilation}[0]
\\times (\\text{kernel\\_size}[0] - 1) - 1}{\\text{stride}[0]} + 1\\right\\rfloor
.. math::
W_{out} = \\left\\lfloor\\frac{W_{in} + 2 \\times \\text{padding}[1] - \\text{dilation}[1]
\\times (\\text{kernel\\_size}[1] - 1) - 1}{\\text{stride}[1]} + 1\\right\\rfloor
Attributes:
weight (Tensor): the learnable weights of the module of shape
:math:`(\\text{out\\_channels}, \\frac{\\text{in\\_channels}}{\\text{groups}},`
:math:`\\text{kernel\\_size[0]}, \\text{kernel\\_size[1]})`.
The values of these weights are sampled from
:math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where
:math:`k = \\frac{groups}{C_\\text{in} * \\prod_{i=0}^{1}\\text{kernel\\_size}[i]}`
bias (Tensor): the learnable bias of the module of shape (out_channels). If :attr:`bias` is ``True``,
then the values of these weights are
sampled from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where
:math:`k = \\frac{groups}{C_\\text{in} * \\prod_{i=0}^{1}\\text{kernel\\_size}[i]}`
.. _CondConv: Conditionally Parameterized Convolutions for Efficient Inference:
https://arxiv.org/abs/1904.04971
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros',
num_experts=3, dropout_rate=0.2):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(CondConv2DNew, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, dilation, False, _pair(0), groups,
bias, padding_mode)
self._avg_pooling = functools.partial(F.adaptive_avg_pool2d,
output_size=(1, 1))
self._routing_fn = _routing(in_channels, num_experts, dropout_rate)
self.weight = Parameter(torch.Tensor(num_experts, out_channels,
in_channels // groups, *kernel_size))
self.reset_parameters()
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._padding_repeated_twice, mode
=self.padding_mode), weight, self.bias, self.stride, _pair(
0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride, self.padding,
self.dilation, self.groups)
def forward(self, input_0):
primals_4 = self.weight
primals_5 = self.bias
primals_2 = self._routing_fn.fc.weight
primals_3 = self._routing_fn.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| yifanpu001/CondConv-pytorch | CondConv2D | false | 13,152 | [
"MIT"
]
| 0 | d5198f1c53de97304f8a23f4ca287cf5b4d33561 | https://github.com/yifanpu001/CondConv-pytorch/tree/d5198f1c53de97304f8a23f4ca287cf5b4d33561 |
Router | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/y4/cy4uizb45dwhrr7twvytpzu4aq7la7f5hnc6osfs347fqt2aecxk.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# x_2 => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%squeeze,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = 1.0
tmp5 = tmp3 / tmp4
tmp6 = tmp5 / tmp4
tmp7 = tl.sigmoid(tmp6)
tl.store(in_out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 1, 1), (1, 1, 1, 1))
buf1 = reinterpret_tensor(buf0, (4, ), (1, ), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(buf1, primals_2, 4, grid=grid(4), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import warnings
import torch.nn as nn
class Router(nn.Module):
"""Convolution + Relu + Global Average Pooling + Sigmoid"""
def __init__(self, input_nc, input_width, input_height, kernel_size=28,
soft_decision=True, stochastic=False, **kwargs):
super(Router, self).__init__()
self.soft_decision = soft_decision
self.stochastic = stochastic
if max(input_width, input_height) < kernel_size:
warnings.warn('Router kernel too large, shrink it')
kernel_size = max(input_width, input_height)
self.conv1 = nn.Conv2d(input_nc, 1, kernel_size=kernel_size)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.conv1(x)
x = x.mean(dim=-1).mean(dim=-1).squeeze()
x = self.output_controller(x)
return x
def output_controller(self, x):
if self.soft_decision:
return self.sigmoid(x)
if self.stochastic:
x = self.sigmoid(x)
return ops.ST_StochasticIndicator()(x)
else:
x = self.sigmoid(x)
return ops.ST_Indicator()(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_nc': 4, 'input_width': 4, 'input_height': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import warnings
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = 1.0
tmp5 = tmp3 / tmp4
tmp6 = tmp5 / tmp4
tmp7 = tl.sigmoid(tmp6)
tl.store(in_out_ptr0 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 1, 1), (1, 1, 1, 1))
buf1 = reinterpret_tensor(buf0, (4,), (1,), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(4)](buf1, primals_2, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf1
class RouterNew(nn.Module):
"""Convolution + Relu + Global Average Pooling + Sigmoid"""
def __init__(self, input_nc, input_width, input_height, kernel_size=28,
soft_decision=True, stochastic=False, **kwargs):
super(RouterNew, self).__init__()
self.soft_decision = soft_decision
self.stochastic = stochastic
if max(input_width, input_height) < kernel_size:
warnings.warn('Router kernel too large, shrink it')
kernel_size = max(input_width, input_height)
self.conv1 = nn.Conv2d(input_nc, 1, kernel_size=kernel_size)
self.sigmoid = nn.Sigmoid()
def output_controller(self, x):
if self.soft_decision:
return self.sigmoid(x)
if self.stochastic:
x = self.sigmoid(x)
return ops.ST_StochasticIndicator()(x)
else:
x = self.sigmoid(x)
return ops.ST_Indicator()(x)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| yulinfeng000/AdaptiveNeuralTrees | Router | false | 13,153 | [
"MIT"
]
| 0 | bbcb381b9cb0c91ae1af33ce43b43f352055041c | https://github.com/yulinfeng000/AdaptiveNeuralTrees/tree/bbcb381b9cb0c91ae1af33ce43b43f352055041c |
ScaledDotProductAttention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/r6/cr6neze6yovkog6kjrk5k2db63h47ozkojywfys6karxe7dlumrz.py
# Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_1 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten.bmm]
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [context], Original ATen: [aten.bmm]
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
return (buf3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
class ScaledDotProductAttention(nn.Module):
"""Scaled dot-product attention mechanism."""
def __init__(self, attention_dropout=0.0):
"""Init.
Args:
attention_dropout: A scalar, dropout rate.
"""
super(ScaledDotProductAttention, self).__init__()
self.dropout = nn.Dropout(attention_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, scale=None, attn_mask=None):
"""Forward pass.
Args:
q: Queries tensor, with shape of [B, L_q, D_q]
k: Keys tensor, with shape of [B, L_k, D_k]
v: Values tensor, with shape of [B, L_v, D_v]
scale: A scalar, scale factor.
attn_mask: A binary masking tensor, with shape of [B, L_q, L_k]
Returns:
Context and attention tensor.
"""
attention = torch.bmm(q, k.transpose(1, 2))
if scale:
attention = attention * scale
if attn_mask:
attention = attention.masked_fill_(attn_mask, -np.inf)
attention = self.softmax(attention)
attention = self.dropout(attention)
context = torch.bmm(attention, v)
return context, attention
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
return buf3, buf2
class ScaledDotProductAttentionNew(nn.Module):
"""Scaled dot-product attention mechanism."""
def __init__(self, attention_dropout=0.0):
"""Init.
Args:
attention_dropout: A scalar, dropout rate.
"""
super(ScaledDotProductAttentionNew, self).__init__()
self.dropout = nn.Dropout(attention_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
| yumoh/pinyin2hanzi | ScaledDotProductAttention | false | 13,154 | [
"MIT"
]
| 0 | 1cbb650d3dd3ec0a0f51be5822556634860ad612 | https://github.com/yumoh/pinyin2hanzi/tree/1cbb650d3dd3ec0a0f51be5822556634860ad612 |
LR | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/ks/ckstvgcataqezlvec7fcskhrnbei33wnpp5xa5c6phrhsesyigwu.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, exp, log, sub, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_per_fused__log_softmax_0 = async_compile.triton('triton_per_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (10, 64), (64, 1))
assert_size_stride(primals_3, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), reinterpret_tensor(primals_2, (64, 10), (1, 64), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_per_fused__log_softmax_0.run(buf0, buf3, 4, 10, grid=grid(4), stream=stream0)
del buf0
return (buf3, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((10, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class LR(nn.Module):
""" Logistinc regression
"""
def __init__(self, input_nc, input_width, input_height, no_classes=10,
**kwargs):
super(LR, self).__init__()
self.fc = nn.Linear(input_nc * input_width * input_height, no_classes)
def forward(self, x):
x = x.view(x.size(0), -1)
return F.log_softmax(self.fc(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_nc': 4, 'input_width': 4, 'input_height': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__log_softmax_0(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (10, 64), (64, 1))
assert_size_stride(primals_3, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (4,
64), (64, 1), 0), reinterpret_tensor(primals_2, (64, 10), (1,
64), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__log_softmax_0[grid(4)](buf0, buf3, 4, 10, XBLOCK=
1, num_warps=2, num_stages=1)
del buf0
return buf3, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), buf3
class LRNew(nn.Module):
""" Logistinc regression
"""
def __init__(self, input_nc, input_width, input_height, no_classes=10,
**kwargs):
super(LRNew, self).__init__()
self.fc = nn.Linear(input_nc * input_width * input_height, no_classes)
def forward(self, input_0):
primals_2 = self.fc.weight
primals_3 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| yulinfeng000/AdaptiveNeuralTrees | LR | false | 13,155 | [
"MIT"
]
| 0 | bbcb381b9cb0c91ae1af33ce43b43f352055041c | https://github.com/yulinfeng000/AdaptiveNeuralTrees/tree/bbcb381b9cb0c91ae1af33ce43b43f352055041c |
MySigmoidFocalLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/ix/cixrcqjs3mmxfrmb4ar6p2ap7t2kmg24knop5yns67fddlpjwuz7.py
# Topologically Sorted Source Nodes: [eq, float_1, neg, sub, pow_1, add, log, term1, mul_2, mul_3, ne, ge, mul_4, float_2, pow_2, sub_1, add_1, log_1, term2, mul_5, mul_6, loss, sum_1, truediv], Original ATen: [aten.eq, aten._to_copy, aten.neg, aten.rsub, aten.pow, aten.add, aten.log, aten.mul, aten.ne, aten.ge, aten.sub, aten.sum, aten.div]
# Source node to ATen node mapping:
# add => add_1
# add_1 => add_2
# eq => eq
# float_1 => convert_element_type_1
# float_2 => convert_element_type_2
# ge => ge
# log => log
# log_1 => log_1
# loss => sub_2
# mul_2 => mul_3
# mul_3 => mul_4
# mul_4 => mul_5
# mul_5 => mul_6
# mul_6 => mul_7
# ne => ne
# neg => neg
# pow_1 => pow_1
# pow_2 => pow_2
# sub => sub
# sub_1 => sub_1
# sum_1 => sum_1
# term1 => mul_1
# term2 => mul_2
# truediv => div
# Graph fragment:
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_1, %unsqueeze), kwargs = {})
# %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%convert_element_type_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-07), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %log), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %mul_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, 4), kwargs = {})
# %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Tensor](args = (%unsqueeze_1, %unsqueeze), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%unsqueeze_1, 0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%ne, %ge), kwargs = {})
# %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_5, torch.float32), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 4), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, 1e-07), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_2,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, %log_1), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_2, %mul_2), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, -3), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_4, %mul_7), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_2,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 4), kwargs = {})
triton_per_fused__to_copy_add_div_eq_ge_log_mul_ne_neg_pow_rsub_sub_sum_0 = async_compile.triton('triton_per_fused__to_copy_add_div_eq_ge_log_mul_ne_neg_pow_rsub_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_div_eq_ge_log_mul_ne_neg_pow_rsub_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_add_div_eq_ge_log_mul_ne_neg_pow_rsub_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r3 = (rindex // 256)
r5 = rindex % 64
r0 = rindex % 4
r7 = rindex % 256
r4 = rindex
tmp0 = tl.load(in_ptr0 + (r5 + (64*r3)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (r7), None, eviction_policy='evict_last')
tmp1 = 1 + r0
tmp2 = tmp1.to(tl.float32)
tmp3 = tmp0 == tmp2
tmp4 = tmp3.to(tl.float32)
tmp5 = -tmp4
tmp7 = 1.0
tmp8 = tmp7 - tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp9 * tmp9
tmp11 = 1e-07
tmp12 = tmp6 + tmp11
tmp13 = tl_math.log(tmp12)
tmp14 = tmp10 * tmp13
tmp15 = tmp5 * tmp14
tmp16 = 4.0
tmp17 = tmp15 * tmp16
tmp18 = tmp0 != tmp2
tmp19 = 0.0
tmp20 = tmp0 >= tmp19
tmp21 = tmp18 & tmp20
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp6 * tmp6
tmp24 = tmp23 * tmp23
tmp25 = tmp8 + tmp11
tmp26 = tl_math.log(tmp25)
tmp27 = tmp24 * tmp26
tmp28 = tmp22 * tmp27
tmp29 = -3.0
tmp30 = tmp28 * tmp29
tmp31 = tmp17 - tmp30
tmp32 = tl.broadcast_to(tmp31, [RBLOCK])
tmp34 = triton_helpers.promote_to_tensor(tl.sum(tmp32, 0))
tmp35 = 0.25
tmp36 = tmp34 * tmp35
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp36, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [eq, float_1, neg, sub, pow_1, add, log, term1, mul_2, mul_3, ne, ge, mul_4, float_2, pow_2, sub_1, add_1, log_1, term2, mul_5, mul_6, loss, sum_1, truediv], Original ATen: [aten.eq, aten._to_copy, aten.neg, aten.rsub, aten.pow, aten.add, aten.log, aten.mul, aten.ne, aten.ge, aten.sub, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_add_div_eq_ge_log_mul_ne_neg_pow_rsub_sub_sum_0.run(buf2, arg1_1, arg0_1, 1, 1024, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
from torch import nn
class MySigmoidFocalLoss(nn.Module):
def __init__(self, gamma, alpha):
super().__init__()
self.gamma = gamma
self.alpha = alpha
def forward(self, confids, targets):
bias = 1e-07
num_classes = confids.shape[1]
dtype = targets.dtype
device = targets.device
gamma = self.gamma
alpha = self.alpha
class_range = torch.arange(1, num_classes + 1, dtype=dtype, device=
device).unsqueeze(0)
t = targets.unsqueeze(1)
p = confids
term1 = (1 - p) ** gamma * torch.log(p + bias)
term2 = p ** gamma * torch.log(1 - p + bias)
loss = -(t == class_range).float() * term1 * alpha - ((t !=
class_range) * (t >= 0)).float() * term2 * (1 - alpha)
return loss.sum() / confids.shape[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'gamma': 4, 'alpha': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_add_div_eq_ge_log_mul_ne_neg_pow_rsub_sub_sum_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex // 256
r5 = rindex % 64
r0 = rindex % 4
r7 = rindex % 256
tmp0 = tl.load(in_ptr0 + (r5 + 64 * r3), None, eviction_policy='evict_last'
)
tmp6 = tl.load(in_ptr1 + r7, None, eviction_policy='evict_last')
tmp1 = 1 + r0
tmp2 = tmp1.to(tl.float32)
tmp3 = tmp0 == tmp2
tmp4 = tmp3.to(tl.float32)
tmp5 = -tmp4
tmp7 = 1.0
tmp8 = tmp7 - tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp9 * tmp9
tmp11 = 1e-07
tmp12 = tmp6 + tmp11
tmp13 = tl_math.log(tmp12)
tmp14 = tmp10 * tmp13
tmp15 = tmp5 * tmp14
tmp16 = 4.0
tmp17 = tmp15 * tmp16
tmp18 = tmp0 != tmp2
tmp19 = 0.0
tmp20 = tmp0 >= tmp19
tmp21 = tmp18 & tmp20
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp6 * tmp6
tmp24 = tmp23 * tmp23
tmp25 = tmp8 + tmp11
tmp26 = tl_math.log(tmp25)
tmp27 = tmp24 * tmp26
tmp28 = tmp22 * tmp27
tmp29 = -3.0
tmp30 = tmp28 * tmp29
tmp31 = tmp17 - tmp30
tmp32 = tl.broadcast_to(tmp31, [RBLOCK])
tmp34 = triton_helpers.promote_to_tensor(tl.sum(tmp32, 0))
tmp35 = 0.25
tmp36 = tmp34 * tmp35
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp36, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused__to_copy_add_div_eq_ge_log_mul_ne_neg_pow_rsub_sub_sum_0[
grid(1)](buf2, arg1_1, arg0_1, 1, 1024, num_warps=8, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class MySigmoidFocalLossNew(nn.Module):
def __init__(self, gamma, alpha):
super().__init__()
self.gamma = gamma
self.alpha = alpha
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| yuruiqi/FCOS | MySigmoidFocalLoss | false | 13,156 | [
"BSD-2-Clause"
]
| 0 | f03f984a03f4e23a0c1c8b470e401d4319e56c3f | https://github.com/yuruiqi/FCOS/tree/f03f984a03f4e23a0c1c8b470e401d4319e56c3f |
SeqAttnMatch | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/sb/csbkytvktsi5xvyifabpordrodcrbhhsdje4rbbt3znhexh2yg7g.py
# Topologically Sorted Source Nodes: [x_proj_1, y_proj_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_proj_1 => relu
# y_proj_1 => relu_1
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp8 = 0.0
tmp9 = tmp7 <= tmp8
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(in_out_ptr1 + (x2), tmp7, xmask)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/hi/chigpju5u7npbqasf2uh6cvmdstmbueidkhexxlp3c46yawgzduu.py
# Topologically Sorted Source Nodes: [alpha_flat], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# alpha_flat => amax, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*(x0 // 4)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*(x0 // 4))), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + (4*(x0 // 4))), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (3 + (4*(x0 // 4))), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp4 = float("-inf")
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp6 > tmp1
tmp9 = tl.where(tmp7, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp12 = tmp11 > tmp1
tmp14 = tl.where(tmp12, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp17 = tmp16 > tmp1
tmp19 = tl.where(tmp17, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + (x0), tmp20, xmask)
tl.store(out_ptr1 + (x0), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/5n/c5nmm2m6x64ohq6fipccy7flvlmvt52jdvfnnmat7k7l73gqfze7.py
# Topologically Sorted Source Nodes: [alpha_flat], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# alpha_flat => amax, div, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*(x1 // 4))), xmask)
tmp3 = tl.load(in_out_ptr0 + (x2), xmask)
tmp6 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp4 = float("-inf")
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf2)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse
buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0); del buf2 # reuse
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_proj_1, y_proj_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, buf3, primals_3, buf9, 64, grid=grid(64), stream=stream0)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.bmm]
extern_kernels.bmm(buf1, reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 1), (1, 16), torch.float32)
buf6 = empty_strided_cuda((16, 1), (1, 16), torch.float32)
# Topologically Sorted Source Nodes: [alpha_flat], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(primals_5, buf4, buf5, buf6, 16, grid=grid(16), stream=stream0)
buf7 = reinterpret_tensor(buf4, (16, 4), (4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [alpha_flat], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf7, primals_5, buf5, buf6, 64, grid=grid(64), stream=stream0)
del buf5
del buf6
del primals_5
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matched_seq], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0), primals_4, out=buf8)
return (buf8, primals_4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf1, buf7, buf3, buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class SeqAttnMatch(nn.Module):
"""Given sequences X and Y, match sequence Y to each element in X.
* o_i = sum(alpha_j * y_j) for i in X
* alpha_j = softmax(y_j * x_i)
"""
def __init__(self, input_size, identity=False):
super(SeqAttnMatch, self).__init__()
if not identity:
self.linear = nn.Linear(input_size, input_size)
else:
self.linear = None
def forward(self, x, y, y_mask):
"""
Args:
x: batch * len1 * hdim
y: batch * len2 * hdim
y_mask: batch * len2 (1 for padding, 0 for true)
Output:
matched_seq: batch * len1 * hdim
"""
if self.linear:
x_proj = self.linear(x.view(-1, x.size(2))).view(x.size())
x_proj = F.relu(x_proj)
y_proj = self.linear(y.view(-1, y.size(2))).view(y.size())
y_proj = F.relu(y_proj)
else:
x_proj = x
y_proj = y
scores = x_proj.bmm(y_proj.transpose(2, 1))
y_mask = y_mask.unsqueeze(1).expand(scores.size())
scores.data.masked_fill_(y_mask.data > 0, -float('inf'))
alpha_flat = F.softmax(scores.view(-1, y.size(1)), dim=-1)
alpha = alpha_flat.view(-1, x.size(1), y.size(1))
matched_seq = alpha.bmm(y)
return matched_seq
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_out_ptr1,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp8 = 0.0
tmp9 = tmp7 <= tmp8
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(in_out_ptr1 + x2, tmp7, xmask)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * (x0 // 4), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * (x0 // 4)), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * (x0 // 4)), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * (x0 // 4)), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp4 = float('-inf')
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp6 > tmp1
tmp9 = tl.where(tmp7, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp12 = tmp11 > tmp1
tmp14 = tl.where(tmp12, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp17 = tmp16 > tmp1
tmp19 = tl.where(tmp17, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + x0, tmp20, xmask)
tl.store(out_ptr1 + x0, tmp31, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 // 4)), xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp4 = float('-inf')
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf2)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0)
del buf2
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1, buf3,
primals_3, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf1, reinterpret_tensor(buf3, (4, 4, 4), (16, 1,
4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 1), (1, 16), torch.float32)
buf6 = empty_strided_cuda((16, 1), (1, 16), torch.float32)
triton_poi_fused__softmax_1[grid(16)](primals_5, buf4, buf5, buf6,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf4, (16, 4), (4, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(64)](buf7, primals_5, buf5, buf6,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf5
del buf6
del primals_5
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1),
0), primals_4, out=buf8)
return buf8, primals_4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf1, buf7, buf3, buf9
class SeqAttnMatchNew(nn.Module):
"""Given sequences X and Y, match sequence Y to each element in X.
* o_i = sum(alpha_j * y_j) for i in X
* alpha_j = softmax(y_j * x_i)
"""
def __init__(self, input_size, identity=False):
super(SeqAttnMatchNew, self).__init__()
if not identity:
self.linear = nn.Linear(input_size, input_size)
else:
self.linear = None
def forward(self, input_0, input_1, input_2):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
primals_4 = input_1
primals_5 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| ys7yoo/DrQAKor | SeqAttnMatch | false | 13,157 | [
"BSD-3-Clause"
]
| 0 | ed9a69dd2a95f8ccb81bd5d6db0fbd59aae0be50 | https://github.com/ys7yoo/DrQAKor/tree/ed9a69dd2a95f8ccb81bd5d6db0fbd59aae0be50 |
FCN_mse | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/ns/cns2e6t63to7326mqrrbrukx4bu4zf2kd446tymg6aua6u7rlxyr.py
# Topologically Sorted Source Nodes: [conv2d, c1], Original ATen: [aten.convolution, aten.tanh]
# Source node to ATen node mapping:
# c1 => tanh
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_tanh_0 = async_compile.triton('triton_poi_fused_convolution_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x3), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/kn/cknhawak2wz4gmvrxpck5jlj53rvfixdjpgeiubpzgzphrpxgsh4.py
# Topologically Sorted Source Nodes: [conv2d_1, c2], Original ATen: [aten.convolution, aten.tanh]
# Source node to ATen node mapping:
# c2 => tanh_1
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%tanh, %primals_4, %primals_5, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %tanh_1 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_tanh_1 = async_compile.triton('triton_poi_fused_convolution_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x3), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/5p/c5p5qppzecxcvl2fokkcfo32s4cs6s4z3s2aeii7lx2jg75iaktc.py
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# score => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%tanh_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (16, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 16, 5, 5), (400, 25, 5, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (1, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, c1], Original ATen: [aten.convolution, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_tanh_0.run(buf1, primals_2, 262144, grid=grid(262144), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, c2], Original ATen: [aten.convolution, aten.tanh]
triton_poi_fused_convolution_tanh_1.run(buf3, primals_5, 524288, grid=grid(524288), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf5, primals_7, 16384, grid=grid(16384), stream=stream0)
del primals_7
return (buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 16, 5, 5), (400, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class FCN_mse(nn.Module):
"""
Predict whether pixels are part of the object or the background.
"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, padding=2)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, padding=2)
self.classifier = nn.Conv2d(32, 1, kernel_size=1)
def forward(self, x):
c1 = torch.tanh(self.conv1(x))
c2 = torch.tanh(self.conv2(c1))
score = self.classifier(c2)
return score
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_tanh_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, None)
@triton.jit
def triton_poi_fused_convolution_tanh_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (16, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 16, 5, 5), (400, 25, 5, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (1, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_tanh_0[grid(262144)](buf1, primals_2,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_tanh_1[grid(524288)](buf3, primals_5,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(16384)](buf5, primals_7, 16384,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3
class FCN_mseNew(nn.Module):
"""
Predict whether pixels are part of the object or the background.
"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, padding=2)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, padding=2)
self.classifier = nn.Conv2d(32, 1, kernel_size=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.classifier.weight
primals_7 = self.classifier.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| yuishihara/chainer-causal-info-gan | FCN_mse | false | 13,158 | [
"MIT"
]
| 0 | 67ff8e66fb1f8762e6c7830be80730395d2eb22c | https://github.com/yuishihara/chainer-causal-info-gan/tree/67ff8e66fb1f8762e6c7830be80730395d2eb22c |
Solver_GAP_OneFClayers | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/h3/ch3l34kqqlue6keu2k5zyuitwqh5ph3eypbf57e4juzyb5tqpeva.py
# Topologically Sorted Source Nodes: [mean, mean_1], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# mean_1 => mean_1
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1]), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean, [-1]), kwargs = {})
triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tl.store(out_ptr0 + (x0), tmp36, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/o5/co5xgxhjhlo7bknaeay7cl5wcvnoj7iz2upia7zbzhelekrx6iym.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, exp, log, sub, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_per_fused__log_softmax_1 = async_compile.triton('triton_per_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (10, 4), (4, 1))
assert_size_stride(primals_3, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, mean_1], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, buf0, reinterpret_tensor(primals_2, (4, 10), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_per_fused__log_softmax_1.run(buf1, buf4, 4, 10, grid=grid(4), stream=stream0)
del buf1
return (buf4, buf0, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((10, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Solver_GAP_OneFClayers(nn.Module):
""" GAP + fc1 """
def __init__(self, input_nc, input_width, input_height, dropout_prob=
0.0, reduction_rate=2, **kwargs):
super(Solver_GAP_OneFClayers, self).__init__()
self.dropout_prob = dropout_prob
self.reduction_rate = reduction_rate
self.fc1 = nn.Linear(input_nc, 10)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = x.mean(dim=-1).mean(dim=-1).squeeze()
x = F.dropout(x, training=self.training, p=self.dropout_prob)
x = self.fc1(x)
return F.log_softmax(x, dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_nc': 4, 'input_width': 4, 'input_height': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tl.store(out_ptr0 + x0, tmp36, xmask)
@triton.jit
def triton_per_fused__log_softmax_1(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (10, 4), (4, 1))
assert_size_stride(primals_3, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_3, buf0, reinterpret_tensor(primals_2,
(4, 10), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_1[grid(4)](buf1, buf4, 4, 10, XBLOCK=
1, num_warps=2, num_stages=1)
del buf1
return buf4, buf0, buf4
class Solver_GAP_OneFClayersNew(nn.Module):
""" GAP + fc1 """
def __init__(self, input_nc, input_width, input_height, dropout_prob=
0.0, reduction_rate=2, **kwargs):
super(Solver_GAP_OneFClayersNew, self).__init__()
self.dropout_prob = dropout_prob
self.reduction_rate = reduction_rate
self.fc1 = nn.Linear(input_nc, 10)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| yulinfeng000/AdaptiveNeuralTrees | Solver_GAP_OneFClayers | false | 13,159 | [
"MIT"
]
| 0 | bbcb381b9cb0c91ae1af33ce43b43f352055041c | https://github.com/yulinfeng000/AdaptiveNeuralTrees/tree/bbcb381b9cb0c91ae1af33ce43b43f352055041c |
Conv_ReLU_Block | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/xn/cxne6bt4wc2qhibbknksztk5an7o2oh4tm72xeans5ostxj5zat3.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ph/cph55ophfk7c5ez5brjds2nfid4clntt6fflr5f3stuvpvtee6qk.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_1, %relu_2], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 64
x0 = xindex % 16
x2 = (xindex // 1024)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (512*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 64, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-32) + x1)) + (512*x2)), tmp6, other=0.0)
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/cp/ccp53as4jo44f3vzopfmnv4xb3h6lqsjbrt7tefbo2h54f4onm6v.py
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_2 => cat_1
# Graph fragment:
# %cat_1 : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%cat, %relu_3], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 96
x0 = xindex % 16
x2 = (xindex // 1536)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (1024*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 96, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-64) + x1)) + (512*x2)), tmp6, other=0.0)
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/tp/ctpzjndfjfqp73vsjs7spqibccbnyyxf4qidxef7zradsgeab4tv.py
# Topologically Sorted Source Nodes: [out_0, cat_3, out], Original ATen: [aten.relu, aten.cat, aten.add, aten.threshold_backward]
# Source node to ATen node mapping:
# cat_3 => cat_2
# out => add
# out_0 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat_1, %relu_4], 1), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu, %cat_2), kwargs = {})
# %le_5 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_add_cat_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_add_cat_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_cat_relu_threshold_backward_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 128
x0 = xindex % 16
x2 = (xindex // 2048)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = x1
tmp4 = tl.full([1], 0, tl.int64)
tmp5 = tmp3 >= tmp4
tmp6 = tl.full([1], 96, tl.int64)
tmp7 = tmp3 < tmp6
tmp8 = tl.load(in_ptr1 + (x0 + (16*x1) + (1536*x2)), tmp7, other=0.0)
tmp9 = tmp3 >= tmp6
tmp10 = tl.full([1], 128, tl.int64)
tmp11 = tmp3 < tmp10
tmp12 = tl.load(in_ptr2 + (x0 + (16*((-96) + x1)) + (512*x2)), tmp9, other=0.0)
tmp13 = triton_helpers.maximum(tmp1, tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp9, tmp13, tmp14)
tmp16 = tl.where(tmp7, tmp8, tmp15)
tmp17 = tmp2 + tmp16
tmp18 = 0.0
tmp19 = tmp2 <= tmp18
tl.store(out_ptr0 + (x3), tmp17, None)
tl.store(out_ptr1 + (x3), tmp19, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/r7/cr7d5h2rstveymc34ztijr4pljmqudb3um5mqrt5m2cnxvu7h52b.py
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_5 => relu_5
# Graph fragment:
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_5, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_relu_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + (x0), tmp2, None)
tl.store(out_ptr0 + (x0), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/vy/cvyiy6v43vyvmziibmmgs54lvmeuks7ej2jtmqgmra45njvcyyn3.py
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_4 => relu_4
# Graph fragment:
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_relu_threshold_backward_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(out_ptr0 + (x0), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (128, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (32, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_6, (32, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_7, (64, 128, 1, 1), (128, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 128, 4, 4), (2048, 16, 4, 1))
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_1, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 32, 4, 4), (512, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf2, 2048, grid=grid(2048), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 4, 4), (512, 16, 4, 1))
buf4 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf2, buf3, buf4, 4096, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 4, 4), (512, 16, 4, 1))
buf6 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf4, buf5, buf6, 6144, grid=grid(6144), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 32, 4, 4), (512, 16, 4, 1))
buf8 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.float32)
buf15 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_0, cat_3, out], Original ATen: [aten.relu, aten.cat, aten.add, aten.threshold_backward]
triton_poi_fused_add_cat_relu_threshold_backward_3.run(buf0, buf6, buf7, buf8, buf15, 8192, grid=grid(8192), stream=stream0)
del buf0
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 4, 4), (1024, 16, 4, 1))
buf10 = buf9; del buf9 # reuse
buf11 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_4.run(buf10, buf11, 4096, grid=grid(4096), stream=stream0)
buf12 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_5.run(buf7, buf12, 2048, grid=grid(2048), stream=stream0)
del buf7
buf13 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_5.run(buf5, buf13, 2048, grid=grid(2048), stream=stream0)
del buf5
buf14 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_5.run(buf3, buf14, 2048, grid=grid(2048), stream=stream0)
del buf3
return (buf10, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, buf2, buf4, buf6, buf8, buf11, buf12, buf13, buf14, buf15, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((128, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Conv_ReLU_Block(nn.Module):
def __init__(self, channel_in):
super(Conv_ReLU_Block, self).__init__()
self.conv_0 = nn.Conv2d(in_channels=channel_in, out_channels=128,
kernel_size=1, stride=1, padding=0, bias=False)
self.conv_1 = nn.Conv2d(in_channels=channel_in, out_channels=32,
kernel_size=3, stride=1, padding=1, bias=False)
self.conv_2 = nn.Conv2d(in_channels=32, out_channels=32,
kernel_size=3, stride=1, padding=1, bias=False)
self.conv_3 = nn.Conv2d(in_channels=64, out_channels=32,
kernel_size=3, stride=1, padding=1, bias=False)
self.conv_4 = nn.Conv2d(in_channels=96, out_channels=32,
kernel_size=3, stride=1, padding=1, bias=False)
self.conv_5 = nn.Conv2d(in_channels=128, out_channels=64,
kernel_size=1, stride=1, padding=0, bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
out_0 = self.relu(self.conv_0(residual))
out_1 = self.relu(self.conv_1(x))
out_2 = self.relu(self.conv_2(out_1))
cat_1 = torch.cat((out_1, out_2), 1)
out_3 = self.relu(self.conv_3(cat_1))
cat_2 = torch.cat((cat_1, out_3), 1)
out_4 = self.relu(self.conv_4(cat_2))
cat_3 = torch.cat((cat_2, out_4), 1)
out = torch.add(out_0, cat_3)
out = self.relu(self.conv_5(out))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel_in': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 64
x0 = xindex % 16
x2 = xindex // 1024
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 512 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 64, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-32 + x1) + 512 * x2), tmp6, other=0.0
)
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 96
x0 = xindex % 16
x2 = xindex // 1536
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 1024 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 96, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-64 + x1) + 512 * x2), tmp6, other=0.0
)
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_add_cat_relu_threshold_backward_3(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 128
x0 = xindex % 16
x2 = xindex // 2048
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = x1
tl.full([1], 0, tl.int64)
tmp6 = tl.full([1], 96, tl.int64)
tmp7 = tmp3 < tmp6
tmp8 = tl.load(in_ptr1 + (x0 + 16 * x1 + 1536 * x2), tmp7, other=0.0)
tmp9 = tmp3 >= tmp6
tl.full([1], 128, tl.int64)
tmp12 = tl.load(in_ptr2 + (x0 + 16 * (-96 + x1) + 512 * x2), tmp9,
other=0.0)
tmp13 = triton_helpers.maximum(tmp1, tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp9, tmp13, tmp14)
tmp16 = tl.where(tmp7, tmp8, tmp15)
tmp17 = tmp2 + tmp16
tmp18 = 0.0
tmp19 = tmp2 <= tmp18
tl.store(out_ptr0 + x3, tmp17, None)
tl.store(out_ptr1 + x3, tmp19, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, None)
tl.store(out_ptr0 + x0, tmp4, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(out_ptr0 + x0, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (128, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (32, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_6, (32, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_7, (64, 128, 1, 1), (128, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 128, 4, 4), (2048, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_1, primals_3, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 32, 4, 4), (512, 16, 4, 1))
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_relu_0[grid(2048)](buf2, 2048, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 4, 4), (512, 16, 4, 1))
buf4 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_1[grid(4096)](buf2, buf3, buf4, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
buf5 = extern_kernels.convolution(buf4, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 4, 4), (512, 16, 4, 1))
buf6 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_2[grid(6144)](buf4, buf5, buf6, 6144, XBLOCK=
128, num_warps=4, num_stages=1)
buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 32, 4, 4), (512, 16, 4, 1))
buf8 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
buf15 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.bool
)
triton_poi_fused_add_cat_relu_threshold_backward_3[grid(8192)](buf0,
buf6, buf7, buf8, buf15, 8192, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
buf9 = extern_kernels.convolution(buf8, primals_7, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 4, 4), (1024, 16, 4, 1))
buf10 = buf9
del buf9
buf11 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_4[grid(4096)](buf10, buf11,
4096, XBLOCK=256, num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(2048)](buf7, buf12,
2048, XBLOCK=128, num_warps=4, num_stages=1)
del buf7
buf13 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(2048)](buf5, buf13,
2048, XBLOCK=128, num_warps=4, num_stages=1)
del buf5
buf14 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(2048)](buf3, buf14,
2048, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
return (buf10, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, primals_7, buf2, buf4, buf6, buf8, buf11, buf12, buf13,
buf14, buf15)
class Conv_ReLU_BlockNew(nn.Module):
def __init__(self, channel_in):
super(Conv_ReLU_BlockNew, self).__init__()
self.conv_0 = nn.Conv2d(in_channels=channel_in, out_channels=128,
kernel_size=1, stride=1, padding=0, bias=False)
self.conv_1 = nn.Conv2d(in_channels=channel_in, out_channels=32,
kernel_size=3, stride=1, padding=1, bias=False)
self.conv_2 = nn.Conv2d(in_channels=32, out_channels=32,
kernel_size=3, stride=1, padding=1, bias=False)
self.conv_3 = nn.Conv2d(in_channels=64, out_channels=32,
kernel_size=3, stride=1, padding=1, bias=False)
self.conv_4 = nn.Conv2d(in_channels=96, out_channels=32,
kernel_size=3, stride=1, padding=1, bias=False)
self.conv_5 = nn.Conv2d(in_channels=128, out_channels=64,
kernel_size=1, stride=1, padding=0, bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_2 = self.conv_0.weight
primals_3 = self.conv_1.weight
primals_4 = self.conv_2.weight
primals_5 = self.conv_3.weight
primals_6 = self.conv_4.weight
primals_7 = self.conv_5.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| ypf780732/multi-staged-fusion-sr | Conv_ReLU_Block | false | 13,160 | [
"MIT"
]
| 0 | 83d82c4310cc9314544793dc0b299a34956044e0 | https://github.com/ypf780732/multi-staged-fusion-sr/tree/83d82c4310cc9314544793dc0b299a34956044e0 |
RouterGAPwithDoubleConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/dn/cdn6cpjk3yoqht7jm6pdvdb52rl2cfnpkjmaufbqmql33n7ucc5f.py
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# out => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/x2/cx2iz3anhrm6wpvn3s52rgilt7mkf7y2otuesv56te64bq6rdk5m.py
# Topologically Sorted Source Nodes: [conv2d_1, out_1, mean, mean_1], Original ATen: [aten.convolution, aten.relu, aten.mean]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# mean => mean
# mean_1 => mean_1
# out_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%relu_1, [-1]), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean, [-1]), kwargs = {})
triton_poi_fused_convolution_mean_relu_1 = async_compile.triton('triton_poi_fused_convolution_mean_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mean_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mean_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_ptr0 + (16*x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + (16*x2)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (3 + (16*x2)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (4 + (16*x2)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (5 + (16*x2)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (6 + (16*x2)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (7 + (16*x2)), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr0 + (8 + (16*x2)), xmask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr0 + (9 + (16*x2)), xmask, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr0 + (10 + (16*x2)), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr0 + (11 + (16*x2)), xmask, eviction_policy='evict_last')
tmp53 = tl.load(in_ptr0 + (12 + (16*x2)), xmask, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr0 + (13 + (16*x2)), xmask, eviction_policy='evict_last')
tmp60 = tl.load(in_ptr0 + (14 + (16*x2)), xmask, eviction_policy='evict_last')
tmp64 = tl.load(in_ptr0 + (15 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp8 = tmp4 + tmp7
tmp10 = tmp9 + tmp1
tmp11 = triton_helpers.maximum(tmp3, tmp10)
tmp12 = tmp8 + tmp11
tmp14 = tmp13 + tmp1
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp16 = tmp12 + tmp15
tmp17 = 4.0
tmp18 = tmp16 / tmp17
tmp20 = tmp19 + tmp1
tmp21 = triton_helpers.maximum(tmp3, tmp20)
tmp23 = tmp22 + tmp1
tmp24 = triton_helpers.maximum(tmp3, tmp23)
tmp25 = tmp21 + tmp24
tmp27 = tmp26 + tmp1
tmp28 = triton_helpers.maximum(tmp3, tmp27)
tmp29 = tmp25 + tmp28
tmp31 = tmp30 + tmp1
tmp32 = triton_helpers.maximum(tmp3, tmp31)
tmp33 = tmp29 + tmp32
tmp34 = tmp33 / tmp17
tmp35 = tmp18 + tmp34
tmp37 = tmp36 + tmp1
tmp38 = triton_helpers.maximum(tmp3, tmp37)
tmp40 = tmp39 + tmp1
tmp41 = triton_helpers.maximum(tmp3, tmp40)
tmp42 = tmp38 + tmp41
tmp44 = tmp43 + tmp1
tmp45 = triton_helpers.maximum(tmp3, tmp44)
tmp46 = tmp42 + tmp45
tmp48 = tmp47 + tmp1
tmp49 = triton_helpers.maximum(tmp3, tmp48)
tmp50 = tmp46 + tmp49
tmp51 = tmp50 / tmp17
tmp52 = tmp35 + tmp51
tmp54 = tmp53 + tmp1
tmp55 = triton_helpers.maximum(tmp3, tmp54)
tmp57 = tmp56 + tmp1
tmp58 = triton_helpers.maximum(tmp3, tmp57)
tmp59 = tmp55 + tmp58
tmp61 = tmp60 + tmp1
tmp62 = triton_helpers.maximum(tmp3, tmp61)
tmp63 = tmp59 + tmp62
tmp65 = tmp64 + tmp1
tmp66 = triton_helpers.maximum(tmp3, tmp65)
tmp67 = tmp63 + tmp66
tmp68 = tmp67 / tmp17
tmp69 = tmp52 + tmp68
tmp70 = tmp69 / tmp17
tl.store(out_ptr0 + (x2), tmp70, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/b7/cb7iq44xucvx4o4uio3etz5hrrkllxx5igr3vjyglpwcku6mi232.py
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# out_4 => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%squeeze_1,), kwargs = {})
triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/bu/cbuiic3wueusipbnuffo3nyd3ip3euzqfzpih4f5w2s5ov2tfaqc.py
# Topologically Sorted Source Nodes: [conv2d_1, out_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# out_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (1, 32), (32, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 4, 4), (512, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 2048, grid=grid(2048), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 4, 4), (512, 16, 4, 1))
buf3 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, out_1, mean, mean_1], Original ATen: [aten.convolution, aten.relu, aten.mean]
triton_poi_fused_convolution_mean_relu_1.run(buf2, primals_5, buf3, 128, grid=grid(128), stream=stream0)
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (32, 1), (1, 32), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, ), (1, ), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_2.run(buf5, primals_7, 4, grid=grid(4), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, out_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_3.run(buf2, primals_5, buf6, 2048, grid=grid(2048), stream=stream0)
del buf2
del primals_5
return (buf5, primals_1, primals_3, primals_4, buf1, buf3, buf5, primals_6, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import warnings
import torch.nn as nn
class RouterGAPwithDoubleConv(nn.Module):
""" 2 x (Convolution + Relu) + Global Average Pooling + FC + Sigmoid """
def __init__(self, input_nc, input_width, input_height, ngf=32,
kernel_size=3, soft_decision=True, stochastic=False, **kwargs):
super(RouterGAPwithDoubleConv, self).__init__()
self.ngf = ngf
self.soft_decision = soft_decision
self.stochastic = stochastic
if max(input_width, input_height) < kernel_size:
warnings.warn('Router kernel too large, shrink it')
kernel_size = max(input_width, input_height)
if max(input_width, input_height) % 2 == 0:
kernel_size += 1
padding = (kernel_size - 1) // 2
self.conv1 = nn.Conv2d(input_nc, ngf, kernel_size=kernel_size,
padding=padding)
self.conv2 = nn.Conv2d(ngf, ngf, kernel_size=kernel_size, padding=
padding)
self.relu = nn.ReLU(inplace=True)
self.linear1 = nn.Linear(ngf, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.relu(self.conv1(x))
out = self.relu(self.conv2(out))
out = out.mean(dim=-1).mean(dim=-1).squeeze()
out = self.linear1(out).squeeze()
out = self.output_controller(out)
return out
def output_controller(self, x):
if self.soft_decision:
return self.sigmoid(x)
if self.stochastic:
x = self.sigmoid(x)
return ops.ST_StochasticIndicator()(x)
else:
x = self.sigmoid(x)
return ops.ST_Indicator()(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_nc': 4, 'input_width': 4, 'input_height': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import warnings
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_mean_relu_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_ptr0 + 16 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (2 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (3 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (4 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr0 + (5 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (6 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (7 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp36 = tl.load(in_ptr0 + (8 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp39 = tl.load(in_ptr0 + (9 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp43 = tl.load(in_ptr0 + (10 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp47 = tl.load(in_ptr0 + (11 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp53 = tl.load(in_ptr0 + (12 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp56 = tl.load(in_ptr0 + (13 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp60 = tl.load(in_ptr0 + (14 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp64 = tl.load(in_ptr0 + (15 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp8 = tmp4 + tmp7
tmp10 = tmp9 + tmp1
tmp11 = triton_helpers.maximum(tmp3, tmp10)
tmp12 = tmp8 + tmp11
tmp14 = tmp13 + tmp1
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp16 = tmp12 + tmp15
tmp17 = 4.0
tmp18 = tmp16 / tmp17
tmp20 = tmp19 + tmp1
tmp21 = triton_helpers.maximum(tmp3, tmp20)
tmp23 = tmp22 + tmp1
tmp24 = triton_helpers.maximum(tmp3, tmp23)
tmp25 = tmp21 + tmp24
tmp27 = tmp26 + tmp1
tmp28 = triton_helpers.maximum(tmp3, tmp27)
tmp29 = tmp25 + tmp28
tmp31 = tmp30 + tmp1
tmp32 = triton_helpers.maximum(tmp3, tmp31)
tmp33 = tmp29 + tmp32
tmp34 = tmp33 / tmp17
tmp35 = tmp18 + tmp34
tmp37 = tmp36 + tmp1
tmp38 = triton_helpers.maximum(tmp3, tmp37)
tmp40 = tmp39 + tmp1
tmp41 = triton_helpers.maximum(tmp3, tmp40)
tmp42 = tmp38 + tmp41
tmp44 = tmp43 + tmp1
tmp45 = triton_helpers.maximum(tmp3, tmp44)
tmp46 = tmp42 + tmp45
tmp48 = tmp47 + tmp1
tmp49 = triton_helpers.maximum(tmp3, tmp48)
tmp50 = tmp46 + tmp49
tmp51 = tmp50 / tmp17
tmp52 = tmp35 + tmp51
tmp54 = tmp53 + tmp1
tmp55 = triton_helpers.maximum(tmp3, tmp54)
tmp57 = tmp56 + tmp1
tmp58 = triton_helpers.maximum(tmp3, tmp57)
tmp59 = tmp55 + tmp58
tmp61 = tmp60 + tmp1
tmp62 = triton_helpers.maximum(tmp3, tmp61)
tmp63 = tmp59 + tmp62
tmp65 = tmp64 + tmp1
tmp66 = triton_helpers.maximum(tmp3, tmp65)
tmp67 = tmp63 + tmp66
tmp68 = tmp67 / tmp17
tmp69 = tmp52 + tmp68
tmp70 = tmp69 / tmp17
tl.store(out_ptr0 + x2, tmp70, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (1, 32), (32, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 4, 4), (512, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(2048)](buf1, primals_2,
2048, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 4, 4), (512, 16, 4, 1))
buf3 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
triton_poi_fused_convolution_mean_relu_1[grid(128)](buf2, primals_5,
buf3, 128, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (32, 1), (1,
32), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4,), (1,), 0)
del buf4
triton_poi_fused_sigmoid_2[grid(4)](buf5, primals_7, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_3[grid(2048)](buf2
, primals_5, buf6, 2048, XBLOCK=128, num_warps=4, num_stages=1)
del buf2
del primals_5
return (buf5, primals_1, primals_3, primals_4, buf1, buf3, buf5,
primals_6, buf6)
class RouterGAPwithDoubleConvNew(nn.Module):
""" 2 x (Convolution + Relu) + Global Average Pooling + FC + Sigmoid """
def __init__(self, input_nc, input_width, input_height, ngf=32,
kernel_size=3, soft_decision=True, stochastic=False, **kwargs):
super(RouterGAPwithDoubleConvNew, self).__init__()
self.ngf = ngf
self.soft_decision = soft_decision
self.stochastic = stochastic
if max(input_width, input_height) < kernel_size:
warnings.warn('Router kernel too large, shrink it')
kernel_size = max(input_width, input_height)
if max(input_width, input_height) % 2 == 0:
kernel_size += 1
padding = (kernel_size - 1) // 2
self.conv1 = nn.Conv2d(input_nc, ngf, kernel_size=kernel_size,
padding=padding)
self.conv2 = nn.Conv2d(ngf, ngf, kernel_size=kernel_size, padding=
padding)
self.relu = nn.ReLU(inplace=True)
self.linear1 = nn.Linear(ngf, 1)
self.sigmoid = nn.Sigmoid()
def output_controller(self, x):
if self.soft_decision:
return self.sigmoid(x)
if self.stochastic:
x = self.sigmoid(x)
return ops.ST_StochasticIndicator()(x)
else:
x = self.sigmoid(x)
return ops.ST_Indicator()(x)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.linear1.weight
primals_7 = self.linear1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| yulinfeng000/AdaptiveNeuralTrees | RouterGAPwithDoubleConv | false | 13,161 | [
"MIT"
]
| 0 | bbcb381b9cb0c91ae1af33ce43b43f352055041c | https://github.com/yulinfeng000/AdaptiveNeuralTrees/tree/bbcb381b9cb0c91ae1af33ce43b43f352055041c |
Attention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/pd/cpdi7xzvi46tuzqsv24tk5nffrrhhudrzinef6vmc6ndavfymp7f.py
# Topologically Sorted Source Nodes: [norm, hl], Original ATen: [aten.linalg_vector_norm, aten.div]
# Source node to ATen node mapping:
# hl => div
# norm => pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %pow_2), kwargs = {})
triton_poi_fused_div_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_div_linalg_vector_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/3g/c3g2xnr7p4cbhacasonntgrvynvr64hbb272af6jiya7rhz46xns.py
# Topologically Sorted Source Nodes: [mul, a], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# a => sum_3
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {})
# %sum_3 : [num_users=4] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
triton_poi_fused_mul_sum_1 = async_compile.triton('triton_poi_fused_mul_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = (xindex // 64)
x3 = xindex % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + ((4*x0) + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x3), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0) + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0) + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0) + (64*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x4), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/mq/cmqahujif54y4dzyjdoqbbzjdeklfrbzydsuai72657vak5uxeqt.py
# Topologically Sorted Source Nodes: [softmax, softmax_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# softmax_1 => amax_1, exp_1, sub_2
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sum_3, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sum_3, [0], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_3, %amax_1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
x3 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tmp12 = triton_helpers.maximum(tmp10, tmp11)
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tmp17 = tmp0 - tmp16
tmp18 = tl_math.exp(tmp17)
tl.store(out_ptr0 + (x4), tmp9, xmask)
tl.store(out_ptr1 + (x4), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/dw/cdwjbah2w7dauf653ig7r3whibssom43mnarvertuhsxmywarcdi.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16) % 4
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x2) + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/k6/ck6okkysq35fc3ftprq4vuqov2p55mrn5uufm3e24zb6rhnefmfu.py
# Topologically Sorted Source Nodes: [mu_lr], Original ATen: [aten.sub]
# Source node to ATen node mapping:
# mu_lr => sub_1
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_1, %view_2), kwargs = {})
triton_poi_fused_sub_4 = async_compile.triton('triton_poi_fused_sub_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sub_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_out_ptr0 + (x0), xmask)
tmp2 = tmp0 - tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ib/cibwnelpailvamp6rr5gdso6jywzchy57ymynnhu4mqffjuom3s5.py
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax_1 => div_3, sum_5
# Graph fragment:
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [0], True), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_5), kwargs = {})
triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/wv/cwvsc5zxnixkw7ne7n3iefdqexjmijhau2mb5u7qeepchbauk55m.py
# Topologically Sorted Source Nodes: [mu_rl], Original ATen: [aten.sub]
# Source node to ATen node mapping:
# mu_rl => sub_3
# Graph fragment:
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %view_5), kwargs = {})
triton_poi_fused_sub_6 = async_compile.triton('triton_poi_fused_sub_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sub_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 - tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm, hl], Original ATen: [aten.linalg_vector_norm, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_linalg_vector_norm_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm_1, hr], Original ATen: [aten.linalg_vector_norm, aten.div]
triton_poi_fused_div_linalg_vector_norm_0.run(arg1_1, buf1, 256, grid=grid(256), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, a], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_1.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax, softmax_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf2, buf3, buf7, 256, grid=grid(256), stream=stream0)
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
buf5 = reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [mu_lr], Original ATen: [aten.sub]
triton_poi_fused_sub_4.run(buf6, buf1, 256, grid=grid(256), stream=stream0)
buf8 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf7, buf8, 256, grid=grid(256), stream=stream0)
buf9 = reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf9)
del buf1
del buf8
buf10 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mu_rl], Original ATen: [aten.sub]
triton_poi_fused_sub_6.run(buf10, buf9, 256, grid=grid(256), stream=stream0)
del buf9
return (buf6, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class Attention(torch.nn.Module):
def __init__(self):
super(Attention, self).__init__()
def forward(self, hl, hr):
hl = hl / hl.norm(dim=-1, keepdim=True)
hr = hr / hr.norm(dim=-1, keepdim=True)
a = (hl[:, None, :] * hr[None, :, :]).sum(dim=-1)
mu_lr = hr - a.softmax(dim=1).transpose(1, 0) @ hl
mu_rl = hl - a.softmax(dim=0) @ hr
return mu_lr, mu_rl
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0 + 64 * x2), xmask, eviction_policy
='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0 + 64 * x2), xmask, eviction_policy
='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0 + 64 * x2), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex % 64
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tmp12 = triton_helpers.maximum(tmp10, tmp11)
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tmp17 = tmp0 - tmp16
tmp18 = tl_math.exp(tmp17)
tl.store(out_ptr0 + x4, tmp9, xmask)
tl.store(out_ptr1 + x4, tmp18, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16 % 4
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_sub_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_out_ptr0 + x0, xmask)
tmp2 = tmp0 - tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_sub_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 - tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_linalg_vector_norm_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_div_linalg_vector_norm_0[grid(256)](arg1_1, buf1,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_1[grid(256)](buf0, buf1, buf2, 256, XBLOCK
=256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf2, buf3, buf7, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf4 = buf2
del buf2
triton_poi_fused_clone_3[grid(256)](buf3, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0)
del buf3
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_sub_4[grid(256)](buf6, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = buf4
del buf4
triton_poi_fused__softmax_5[grid(256)](buf7, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0)
del buf7
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf9)
del buf1
del buf8
buf10 = buf0
del buf0
triton_poi_fused_sub_6[grid(256)](buf10, buf9, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf9
return buf6, buf10
class AttentionNew(torch.nn.Module):
def __init__(self):
super(AttentionNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
| yuanqing-wang/graca | Attention | false | 13,162 | [
"MIT"
]
| 0 | 6934e3cfe219a7f866b1f9e4ebcc107d76b47585 | https://github.com/yuanqing-wang/graca/tree/6934e3cfe219a7f866b1f9e4ebcc107d76b47585 |
MLP_AlexNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/hy/chyn4ucj7uhqavrcrhxk2c5izzfdiw63bn3glmpyn3tpx5bpigdc.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/o5/co5xgxhjhlo7bknaeay7cl5wcvnoj7iz2upia7zbzhelekrx6iym.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, exp, log, sub, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_per_fused__log_softmax_1 = async_compile.triton('triton_per_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (128, 64), (64, 1))
assert_size_stride(primals_3, (128, ), (1, ))
assert_size_stride(primals_4, (10, 128), (128, 1))
assert_size_stride(primals_5, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), reinterpret_tensor(primals_2, (64, 128), (1, 64), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 512, grid=grid(512), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf5 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_per_fused__log_softmax_1.run(buf2, buf5, 4, 10, grid=grid(4), stream=stream0)
del buf2
return (buf5, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), buf1, buf5, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((128, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((10, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class MLP_AlexNet(nn.Module):
""" The last fully connected part of LeNet MNIST:
https://github.com/BVLC/caffe/blob/master/examples/mnist/lenet.prototxt
"""
def __init__(self, input_nc, input_width, input_height, dropout_prob=
0.0, **kwargs):
super(MLP_AlexNet, self).__init__()
self.dropout_prob = dropout_prob
ngf = input_nc * input_width * input_height
self.fc1 = nn.Linear(ngf, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training, p=self.dropout_prob)
x = self.fc2(x)
return F.log_softmax(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_nc': 4, 'input_width': 4, 'input_height': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_1(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (128, 64), (64, 1))
assert_size_stride(primals_3, (128,), (1,))
assert_size_stride(primals_4, (10, 128), (128, 1))
assert_size_stride(primals_5, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0
), reinterpret_tensor(primals_2, (64, 128), (1, 64), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(512)](buf1, primals_3, 512, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(128, 10), (1, 128), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf5 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_1[grid(4)](buf2, buf5, 4, 10, XBLOCK=
1, num_warps=2, num_stages=1)
del buf2
return buf5, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0
), buf1, buf5, primals_4
class MLP_AlexNetNew(nn.Module):
""" The last fully connected part of LeNet MNIST:
https://github.com/BVLC/caffe/blob/master/examples/mnist/lenet.prototxt
"""
def __init__(self, input_nc, input_width, input_height, dropout_prob=
0.0, **kwargs):
super(MLP_AlexNetNew, self).__init__()
self.dropout_prob = dropout_prob
ngf = input_nc * input_width * input_height
self.fc1 = nn.Linear(ngf, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| yulinfeng000/AdaptiveNeuralTrees | MLP_AlexNet | false | 13,163 | [
"MIT"
]
| 0 | bbcb381b9cb0c91ae1af33ce43b43f352055041c | https://github.com/yulinfeng000/AdaptiveNeuralTrees/tree/bbcb381b9cb0c91ae1af33ce43b43f352055041c |
MLP_LeNetMNIST | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/zr/czrdc5wxxp4nwl2wso32oylwmr7dwxgtcls32ts2v7sz2hj2677b.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 160
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 40
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/o5/co5xgxhjhlo7bknaeay7cl5wcvnoj7iz2upia7zbzhelekrx6iym.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, exp, log, sub, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_per_fused__log_softmax_1 = async_compile.triton('triton_per_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (40, 64), (64, 1))
assert_size_stride(primals_3, (40, ), (1, ))
assert_size_stride(primals_4, (10, 40), (40, 1))
assert_size_stride(primals_5, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 40), (40, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), reinterpret_tensor(primals_2, (64, 40), (1, 64), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 160, grid=grid(160), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (40, 10), (1, 40), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf5 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_per_fused__log_softmax_1.run(buf2, buf5, 4, 10, grid=grid(4), stream=stream0)
del buf2
return (buf5, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), buf1, buf5, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((40, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((40, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((10, 40), (40, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class MLP_LeNetMNIST(nn.Module):
""" The last fully connected part of LeNet MNIST:
https://github.com/BVLC/caffe/blob/master/examples/mnist/lenet.prototxt
"""
def __init__(self, input_nc, input_width, input_height, dropout_prob=
0.0, **kwargs):
super(MLP_LeNetMNIST, self).__init__()
self.dropout_prob = dropout_prob
ngf = input_nc * input_width * input_height
self.fc1 = nn.Linear(ngf, int(round(ngf / 1.6)))
self.fc2 = nn.Linear(int(round(ngf / 1.6)), 10)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training, p=self.dropout_prob)
x = self.fc2(x)
return F.log_softmax(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_nc': 4, 'input_width': 4, 'input_height': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 160
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 40
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_1(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (40, 64), (64, 1))
assert_size_stride(primals_3, (40,), (1,))
assert_size_stride(primals_4, (10, 40), (40, 1))
assert_size_stride(primals_5, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 40), (40, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0
), reinterpret_tensor(primals_2, (64, 40), (1, 64), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(160)](buf1, primals_3, 160, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(40, 10), (1, 40), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf5 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_1[grid(4)](buf2, buf5, 4, 10, XBLOCK=
1, num_warps=2, num_stages=1)
del buf2
return buf5, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0
), buf1, buf5, primals_4
class MLP_LeNetMNISTNew(nn.Module):
""" The last fully connected part of LeNet MNIST:
https://github.com/BVLC/caffe/blob/master/examples/mnist/lenet.prototxt
"""
def __init__(self, input_nc, input_width, input_height, dropout_prob=
0.0, **kwargs):
super(MLP_LeNetMNISTNew, self).__init__()
self.dropout_prob = dropout_prob
ngf = input_nc * input_width * input_height
self.fc1 = nn.Linear(ngf, int(round(ngf / 1.6)))
self.fc2 = nn.Linear(int(round(ngf / 1.6)), 10)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| yulinfeng000/AdaptiveNeuralTrees | MLP_LeNetMNIST | false | 13,164 | [
"MIT"
]
| 0 | bbcb381b9cb0c91ae1af33ce43b43f352055041c | https://github.com/yulinfeng000/AdaptiveNeuralTrees/tree/bbcb381b9cb0c91ae1af33ce43b43f352055041c |
LRN | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/5u/c5u7jgtbilvlp5oee3xzrm3bmkras7mb3t7afiv32pyh7z4mtdml.py
# Topologically Sorted Source Nodes: [div, div_1, mul, add, div_2, x], Original ATen: [aten.pow, aten.avg_pool2d, aten.mul, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# div => pow_1
# div_1 => avg_pool2d
# div_2 => pow_2
# mul => mul
# x => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%pow_1, [1, 1], [1, 1]), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%avg_pool2d, 1.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1.0), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.75), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %pow_2), kwargs = {})
triton_poi_fused_add_avg_pool2d_div_mul_pow_0 = async_compile.triton('triton_poi_fused_add_avg_pool2d_div_mul_pow_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_avg_pool2d_div_mul_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_avg_pool2d_div_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 * tmp0
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 + tmp2
tmp6 = 0.75
tmp7 = libdevice.pow(tmp5, tmp6)
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [div, div_1, mul, add, div_2, x], Original ATen: [aten.pow, aten.avg_pool2d, aten.mul, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_avg_pool2d_div_mul_pow_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LRN(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=
False):
super(LRN, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if self.ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1, padding=(int((local_size - 1.0) / 2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size, stride=1,
padding=int((local_size - 1.0) / 2))
self.alpha = alpha
self.beta = beta
def forward(self, x):
if self.ACROSS_CHANNELS:
div = x.pow(2).unsqueeze(1)
div = self.average(div).squeeze(1)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
else:
div = x.pow(2)
div = self.average(div)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
x = x.div(div)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_avg_pool2d_div_mul_pow_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 + tmp2
tmp6 = 0.75
tmp7 = libdevice.pow(tmp5, tmp6)
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_avg_pool2d_div_mul_pow_0[grid(256)](arg0_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class LRNNew(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=
False):
super(LRNNew, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if self.ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1, padding=(int((local_size - 1.0) / 2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size, stride=1,
padding=int((local_size - 1.0) / 2))
self.alpha = alpha
self.beta = beta
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| zenghui9977/AFL | LRN | false | 13,165 | [
"MIT"
]
| 0 | 769d78be94ce8f80d376aceb2de9dc5a9d20a807 | https://github.com/zenghui9977/AFL/tree/769d78be94ce8f80d376aceb2de9dc5a9d20a807 |
SimpleMLPGen_with_meta_feature | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/mi/cmi476zw6ohnah2zxegdqc6fvw4ui6ahcdpt576v245i6ehle2hj.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.elu]
# Source node to ATen node mapping:
# x => expm1, gt, mul, mul_2, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {})
triton_poi_fused_elu_0 = async_compile.triton('triton_poi_fused_elu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.elu]
stream0 = get_raw_stream(0)
triton_poi_fused_elu_0.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 1), (1, 0), 0), reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf3)
del primals_5
return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 1), (1, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.optim
import torch.jit
import torch.nn as nn
class SimpleMLPGen_with_meta_feature(nn.Module):
def __init__(self, num_in_features, num_out_features, neurons_per_layer):
super(SimpleMLPGen_with_meta_feature, self).__init__()
self.l_in = nn.Linear(in_features=num_in_features, out_features=
neurons_per_layer)
self.l_out = nn.Linear(in_features=neurons_per_layer, out_features=
num_out_features)
self.act = nn.ELU()
def forward(self, x):
x = self.act(self.l_in(x))
x = self.l_out(x)
return x
def set_parameters(self, meta_in_features, simple_mlp_gen_obj):
x = simple_mlp_gen_obj.act(simple_mlp_gen_obj.l_in(meta_in_features))
x = simple_mlp_gen_obj.l_out(x)
_base = (simple_mlp_gen_obj.num_in_features * simple_mlp_gen_obj.
neurons_per_layer)
l_in_weight = x[:_base].reshape((simple_mlp_gen_obj.num_in_features,
simple_mlp_gen_obj.neurons_per_layer)).t()
l_in_bias = x[_base:_base + simple_mlp_gen_obj.neurons_per_layer]
_base += simple_mlp_gen_obj.neurons_per_layer
_base_add = (simple_mlp_gen_obj.neurons_per_layer *
simple_mlp_gen_obj.num_out_features)
l_out_weight = x[_base:_base + _base_add].reshape((
simple_mlp_gen_obj.neurons_per_layer, simple_mlp_gen_obj.
num_out_features)).t()
_base += _base_add
l_out_bias = x[_base:]
self.l_in.weight = torch.nn.Parameter(l_in_weight)
self.l_out.weight = torch.nn.Parameter(l_out_weight)
self.l_in.bias = torch.nn.Parameter(l_in_bias)
self.l_out.bias = torch.nn.Parameter(l_out_bias)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_in_features': 4, 'num_out_features': 4,
'neurons_per_layer': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.optim
import torch.jit
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_elu_0[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 1), (
1, 0), 0), reinterpret_tensor(primals_4, (1, 4), (1, 1), 0),
alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 1), (1, 1), 0), primals_4
class SimpleMLPGen_with_meta_featureNew(nn.Module):
def __init__(self, num_in_features, num_out_features, neurons_per_layer):
super(SimpleMLPGen_with_meta_featureNew, self).__init__()
self.l_in = nn.Linear(in_features=num_in_features, out_features=
neurons_per_layer)
self.l_out = nn.Linear(in_features=neurons_per_layer, out_features=
num_out_features)
self.act = nn.ELU()
def set_parameters(self, meta_in_features, simple_mlp_gen_obj):
x = simple_mlp_gen_obj.act(simple_mlp_gen_obj.l_in(meta_in_features))
x = simple_mlp_gen_obj.l_out(x)
_base = (simple_mlp_gen_obj.num_in_features * simple_mlp_gen_obj.
neurons_per_layer)
l_in_weight = x[:_base].reshape((simple_mlp_gen_obj.num_in_features,
simple_mlp_gen_obj.neurons_per_layer)).t()
l_in_bias = x[_base:_base + simple_mlp_gen_obj.neurons_per_layer]
_base += simple_mlp_gen_obj.neurons_per_layer
_base_add = (simple_mlp_gen_obj.neurons_per_layer *
simple_mlp_gen_obj.num_out_features)
l_out_weight = x[_base:_base + _base_add].reshape((
simple_mlp_gen_obj.neurons_per_layer, simple_mlp_gen_obj.
num_out_features)).t()
_base += _base_add
l_out_bias = x[_base:]
self.l_in.weight = torch.nn.Parameter(l_in_weight)
self.l_out.weight = torch.nn.Parameter(l_out_weight)
self.l_in.bias = torch.nn.Parameter(l_in_bias)
self.l_out.bias = torch.nn.Parameter(l_out_bias)
def forward(self, input_0):
primals_1 = self.l_in.weight
primals_2 = self.l_in.bias
primals_4 = self.l_out.weight
primals_5 = self.l_out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| zhaofeng-shu33/deep_euler_tests | SimpleMLPGen_with_meta_feature | false | 13,166 | [
"MIT"
]
| 0 | a3d0961af679d490b0c58873ee0726234122bc7a | https://github.com/zhaofeng-shu33/deep_euler_tests/tree/a3d0961af679d490b0c58873ee0726234122bc7a |
BertLMHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/p2/cp2wa74dtkm6qkosxpcuuagmtwosvyyprayglgoywwx7jl2uyxvq.py
# Topologically Sorted Source Nodes: [gelu, layer_norm], Original ATen: [aten.gelu, aten.native_layer_norm]
# Source node to ATen node mapping:
# gelu => add, erf, mul, mul_1, mul_2
# layer_norm => var_mean
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%mul_2, [3]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_gelu_native_layer_norm_0 = async_compile.triton('triton_poi_fused_gelu_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp9 * tmp1
tmp11 = tmp9 * tmp3
tmp12 = libdevice.erf(tmp11)
tmp13 = tmp12 + tmp6
tmp14 = tmp10 * tmp13
tmp15 = tmp8 + tmp14
tmp17 = tmp16 * tmp1
tmp18 = tmp16 * tmp3
tmp19 = libdevice.erf(tmp18)
tmp20 = tmp19 + tmp6
tmp21 = tmp17 * tmp20
tmp22 = tmp15 + tmp21
tmp24 = tmp23 * tmp1
tmp25 = tmp23 * tmp3
tmp26 = libdevice.erf(tmp25)
tmp27 = tmp26 + tmp6
tmp28 = tmp24 * tmp27
tmp29 = tmp22 + tmp28
tmp30 = 4.0
tmp31 = tmp29 / tmp30
tmp32 = tmp8 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tmp14 - tmp31
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp21 - tmp31
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp28 - tmp31
tmp41 = tmp40 * tmp40
tmp42 = tmp39 + tmp41
tmp43 = tmp42 / tmp30
tl.store(out_ptr0 + (x0), tmp31, xmask)
tl.store(out_ptr1 + (x0), tmp43, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/67/c67667h7wwkrh6woa5e7cshihpzrmjgaxajdc7zzwozfvvezfquq.py
# Topologically Sorted Source Nodes: [gelu, layer_norm], Original ATen: [aten.gelu, aten.native_layer_norm]
# Source node to ATen node mapping:
# gelu => add, erf, mul, mul_1, mul_2
# layer_norm => add_1, add_2, mul_3, mul_4, rsqrt, sub
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %getitem_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_4), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_5), kwargs = {})
triton_poi_fused_gelu_native_layer_norm_1 = async_compile.triton('triton_poi_fused_gelu_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp9 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp8 - tmp9
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tmp14 = libdevice.rsqrt(tmp13)
tmp15 = tmp10 * tmp14
tmp17 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tl.store(out_ptr0 + (x2), tmp19, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [gelu, layer_norm], Original ATen: [aten.gelu, aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_gelu_native_layer_norm_0.run(buf0, buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gelu, layer_norm], Original ATen: [aten.gelu, aten.native_layer_norm]
triton_poi_fused_gelu_native_layer_norm_1.run(buf0, buf1, buf2, primals_4, primals_5, buf3, 256, grid=grid(256), stream=stream0)
del buf1
del buf2
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
from torch.nn import Module
import torch
from torch.nn import LayerNorm
from torch.nn import Linear
from torch.nn.functional import gelu
class BertLMHead(Module):
def __init__(self, config):
super(BertLMHead, self).__init__()
hidden_size = config['hidden_size']
self.mlp = Linear(hidden_size, hidden_size, bias=True)
self.unembedding = Linear(hidden_size, config['vocab_size'], bias=True)
self.layer_norm = LayerNorm((hidden_size,))
def forward(self, activations):
return self.unembedding(self.layer_norm(gelu(self.mlp(activations))))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, vocab_size=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
from torch.nn import LayerNorm
from torch.nn import Linear
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp9 * tmp1
tmp11 = tmp9 * tmp3
tmp12 = libdevice.erf(tmp11)
tmp13 = tmp12 + tmp6
tmp14 = tmp10 * tmp13
tmp15 = tmp8 + tmp14
tmp17 = tmp16 * tmp1
tmp18 = tmp16 * tmp3
tmp19 = libdevice.erf(tmp18)
tmp20 = tmp19 + tmp6
tmp21 = tmp17 * tmp20
tmp22 = tmp15 + tmp21
tmp24 = tmp23 * tmp1
tmp25 = tmp23 * tmp3
tmp26 = libdevice.erf(tmp25)
tmp27 = tmp26 + tmp6
tmp28 = tmp24 * tmp27
tmp29 = tmp22 + tmp28
tmp30 = 4.0
tmp31 = tmp29 / tmp30
tmp32 = tmp8 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tmp14 - tmp31
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp21 - tmp31
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp28 - tmp31
tmp41 = tmp40 * tmp40
tmp42 = tmp39 + tmp41
tmp43 = tmp42 / tmp30
tl.store(out_ptr0 + x0, tmp31, xmask)
tl.store(out_ptr1 + x0, tmp43, xmask)
@triton.jit
def triton_poi_fused_gelu_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp9 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp8 - tmp9
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tmp14 = libdevice.rsqrt(tmp13)
tmp15 = tmp10 * tmp14
tmp17 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_native_layer_norm_0[grid(64)](buf0, buf1,
buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_gelu_native_layer_norm_1[grid(256)](buf0, buf1,
buf2, primals_4, primals_5, buf3, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf1
del buf2
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_6
class BertLMHeadNew(Module):
def __init__(self, config):
super(BertLMHeadNew, self).__init__()
hidden_size = config['hidden_size']
self.mlp = Linear(hidden_size, hidden_size, bias=True)
self.unembedding = Linear(hidden_size, config['vocab_size'], bias=True)
self.layer_norm = LayerNorm((hidden_size,))
def forward(self, input_0):
primals_1 = self.mlp.weight
primals_2 = self.mlp.bias
primals_6 = self.unembedding.weight
primals_4 = self.unembedding.bias
primals_5 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| yulonglin/bert | BertLMHead | false | 13,167 | [
"MIT"
]
| 0 | 7f992e88f109e4267b0e84f8398cab0561a67f4f | https://github.com/yulonglin/bert/tree/7f992e88f109e4267b0e84f8398cab0561a67f4f |
NormedResidualLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/nh/cnhx37tsffx4r7taj3xi72s7yfpnnccem24fupfbht6b7bzliavu.py
# Topologically Sorted Source Nodes: [intermediate], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# intermediate => add, erf, mul, mul_1, mul_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
triton_poi_fused_gelu_0 = async_compile.triton('triton_poi_fused_gelu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ji/cji7mw45fbdoanjc5e6qu3e2bf5d6jnnjabskl6onjlk7uv7oqud.py
# Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# output => add_1
# output_1 => var_mean
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_3), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_1, [3]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_1 = async_compile.triton('triton_poi_fused_add_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/xy/cxyvzp6lij7d3yqq2ut3vi6guk7xnzb7qwqb66dthlly44r65vfk.py
# Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# output => add_1
# output_1 => add_2, add_3, mul_3, mul_4, rsqrt, sub
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %getitem_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_6), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_7), kwargs = {})
triton_poi_fused_add_native_layer_norm_2 = async_compile.triton('triton_poi_fused_add_native_layer_norm_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [intermediate], Original ATen: [aten.gelu]
stream0 = get_raw_stream(0)
triton_poi_fused_gelu_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_1.run(buf2, primals_3, buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_2.run(buf2, primals_3, buf3, buf4, primals_6, primals_7, buf5, 256, grid=grid(256), stream=stream0)
del buf3
del buf4
del primals_7
return (buf5, primals_3, primals_6, buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
from torch.nn import Dropout
from torch.nn import LayerNorm
from torch.nn import Linear
from torch.nn.functional import gelu
class NormedResidualLayer(Module):
def __init__(self, size, intermediate_size, dropout):
super(NormedResidualLayer, self).__init__()
self.mlp1 = Linear(size, intermediate_size, bias=True)
self.mlp2 = Linear(intermediate_size, size, bias=True)
self.layer_norm = LayerNorm((size,))
self.dropout = Dropout(dropout)
def forward(self, input):
intermediate = gelu(self.mlp1(input))
output = self.dropout(self.mlp2(intermediate)) + input
output = self.layer_norm(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'size': 4, 'intermediate_size': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
from torch.nn import Dropout
from torch.nn import LayerNorm
from torch.nn import Linear
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_native_layer_norm_1[grid(64)](buf2, primals_3,
buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_2[grid(256)](buf2, primals_3,
buf3, buf4, primals_6, primals_7, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf3
del buf4
del primals_7
return buf5, primals_3, primals_6, buf0, reinterpret_tensor(buf1, (64,
4), (4, 1), 0), buf2, primals_4
class NormedResidualLayerNew(Module):
def __init__(self, size, intermediate_size, dropout):
super(NormedResidualLayerNew, self).__init__()
self.mlp1 = Linear(size, intermediate_size, bias=True)
self.mlp2 = Linear(intermediate_size, size, bias=True)
self.layer_norm = LayerNorm((size,))
self.dropout = Dropout(dropout)
def forward(self, input_0):
primals_1 = self.mlp1.weight
primals_2 = self.mlp1.bias
primals_4 = self.mlp2.weight
primals_5 = self.mlp2.bias
primals_6 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| yulonglin/bert | NormedResidualLayer | false | 13,168 | [
"MIT"
]
| 0 | 7f992e88f109e4267b0e84f8398cab0561a67f4f | https://github.com/yulonglin/bert/tree/7f992e88f109e4267b0e84f8398cab0561a67f4f |
GLU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/yd/cydoiibutfe3ome7v57n5uwsr4vxzbjyuhhdscab4ea22iuzhboe.py
# Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (2 + x0 + (4*x1)), xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 2), (128, 32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0.run(arg0_1, buf0, 512, grid=grid(512), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class GLU(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
out, gate = x.chunk(2, dim=self.dim)
return out * gate.sigmoid()
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 2), (128, 32, 8, 2, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(512)](arg0_1, buf0, 512, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GLUNew(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| zhengx18/conformer | GLU | false | 13,169 | [
"MIT"
]
| 0 | a258c0b0cc70034f53d2b2040badf5d58aab95bc | https://github.com/zhengx18/conformer/tree/a258c0b0cc70034f53d2b2040badf5d58aab95bc |
TripletLogExpLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/a2/ca2u3chgeorw6tm3hldz3fjzmdh6mfkgfvuaeu4xcp3hxjcv6bwn.py
# Topologically Sorted Source Nodes: [d_p, d_n, sub, exp, add, dist, loss], Original ATen: [aten.sub, aten.add, aten.norm, aten.exp, aten.log, aten.mean]
# Source node to ATen node mapping:
# add => add_2
# d_n => add_1, pow_3, pow_4, sub_1, sum_2
# d_p => add, pow_1, pow_2, sub, sum_1
# dist => log
# exp => exp
# loss => mean
# sub => sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Scalar](args = (%sub, 1e-06), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2.0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg2_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Scalar](args = (%sub_1, 1e-06), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_1, 2.0), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1]), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_2, %pow_4), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, 1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_2,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%log,), kwargs = {})
triton_per_fused_add_exp_log_mean_norm_sub_0 = async_compile.triton('triton_per_fused_add_exp_log_mean_norm_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_exp_log_mean_norm_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_exp_log_mean_norm_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr2 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = 1e-06
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 + tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 + tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp26 = tmp0 - tmp25
tmp27 = tmp26 + tmp3
tmp28 = tmp27 * tmp27
tmp30 = tmp6 - tmp29
tmp31 = tmp30 + tmp3
tmp32 = tmp31 * tmp31
tmp33 = tmp28 + tmp32
tmp35 = tmp12 - tmp34
tmp36 = tmp35 + tmp3
tmp37 = tmp36 * tmp36
tmp38 = tmp33 + tmp37
tmp40 = tmp18 - tmp39
tmp41 = tmp40 + tmp3
tmp42 = tmp41 * tmp41
tmp43 = tmp38 + tmp42
tmp44 = libdevice.sqrt(tmp43)
tmp45 = tmp24 - tmp44
tmp46 = tl_math.exp(tmp45)
tmp47 = 1.0
tmp48 = tmp46 + tmp47
tmp49 = tl_math.log(tmp48)
tmp50 = tl.broadcast_to(tmp49, [XBLOCK, RBLOCK])
tmp52 = tl.sum(tmp50, 1)[:, None]
tmp53 = 4.0
tmp54 = tmp52 / tmp53
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp54, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [d_p, d_n, sub, exp, add, dist, loss], Original ATen: [aten.sub, aten.add, aten.norm, aten.exp, aten.log, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_exp_log_mean_norm_sub_0.run(buf2, arg0_1, arg1_1, arg2_1, 1, 4, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
class TripletLogExpLoss(nn.Module):
"""Creates a criterion that measures the triplet loss given an input
tensors x1, x2, x3.
This is used for measuring a relative similarity between samples. A triplet
is composed by `a`, `p` and `n`: anchor, positive examples and negative
example respectively. The shape of all input variables should be
:math:`(N, D)`.
The distance is described in detail in the paper `Improving Pairwise Ranking for Multi-Label
Image Classification`_ by Y. Li et al.
.. math::
L(a, p, n) = log \\left( 1 + exp(d(a_i, p_i) - d(a_i, n_i) \\right)
Args:
anchor: anchor input tensor
positive: positive input tensor
negative: negative input tensor
Shape:
- Input: :math:`(N, D)` where `D = vector dimension`
- Output: :math:`(N, 1)`
>>> triplet_loss = nn.TripletLogExpLoss(p=2)
>>> input1 = autograd.Variable(torch.randn(100, 128))
>>> input2 = autograd.Variable(torch.randn(100, 128))
>>> input3 = autograd.Variable(torch.randn(100, 128))
>>> output = triplet_loss(input1, input2, input3)
>>> output.backward()
.. _Learning shallow convolutional feature descriptors with triplet losses:
http://www.iis.ee.ic.ac.uk/%7Evbalnt/shallow_descr/TFeat_paper.pdf
"""
def __init__(self, p=2, eps=1e-06, swap=False):
super(TripletLogExpLoss, self).__init__()
self.p = p
self.eps = eps
self.swap = swap
def forward(self, anchor, positive, negative):
assert anchor.size() == positive.size(
), 'Input sizes between positive and negative must be equal.'
assert anchor.size() == negative.size(
), 'Input sizes between anchor and negative must be equal.'
assert positive.size() == negative.size(
), 'Input sizes between positive and negative must be equal.'
assert anchor.dim() == 2, 'Input must be a 2D matrix.'
d_p = F.pairwise_distance(anchor, positive, self.p, self.eps)
d_n = F.pairwise_distance(anchor, negative, self.p, self.eps)
if self.swap:
d_s = F.pairwise_distance(positive, negative, self.p, self.eps)
d_n = torch.min(d_n, d_s)
dist = torch.log(1 + torch.exp(d_p - d_n))
loss = torch.mean(dist)
return loss
def eval_func(self, dp, dn):
return np.log(1 + np.exp(dp - dn))
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_exp_log_mean_norm_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = 1e-06
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 + tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 + tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp26 = tmp0 - tmp25
tmp27 = tmp26 + tmp3
tmp28 = tmp27 * tmp27
tmp30 = tmp6 - tmp29
tmp31 = tmp30 + tmp3
tmp32 = tmp31 * tmp31
tmp33 = tmp28 + tmp32
tmp35 = tmp12 - tmp34
tmp36 = tmp35 + tmp3
tmp37 = tmp36 * tmp36
tmp38 = tmp33 + tmp37
tmp40 = tmp18 - tmp39
tmp41 = tmp40 + tmp3
tmp42 = tmp41 * tmp41
tmp43 = tmp38 + tmp42
tmp44 = libdevice.sqrt(tmp43)
tmp45 = tmp24 - tmp44
tmp46 = tl_math.exp(tmp45)
tmp47 = 1.0
tmp48 = tmp46 + tmp47
tmp49 = tl_math.log(tmp48)
tmp50 = tl.broadcast_to(tmp49, [XBLOCK, RBLOCK])
tmp52 = tl.sum(tmp50, 1)[:, None]
tmp53 = 4.0
tmp54 = tmp52 / tmp53
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp54, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_exp_log_mean_norm_sub_0[grid(1)](buf2, arg0_1,
arg1_1, arg2_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class TripletLogExpLossNew(nn.Module):
"""Creates a criterion that measures the triplet loss given an input
tensors x1, x2, x3.
This is used for measuring a relative similarity between samples. A triplet
is composed by `a`, `p` and `n`: anchor, positive examples and negative
example respectively. The shape of all input variables should be
:math:`(N, D)`.
The distance is described in detail in the paper `Improving Pairwise Ranking for Multi-Label
Image Classification`_ by Y. Li et al.
.. math::
L(a, p, n) = log \\left( 1 + exp(d(a_i, p_i) - d(a_i, n_i) \\right)
Args:
anchor: anchor input tensor
positive: positive input tensor
negative: negative input tensor
Shape:
- Input: :math:`(N, D)` where `D = vector dimension`
- Output: :math:`(N, 1)`
>>> triplet_loss = nn.TripletLogExpLoss(p=2)
>>> input1 = autograd.Variable(torch.randn(100, 128))
>>> input2 = autograd.Variable(torch.randn(100, 128))
>>> input3 = autograd.Variable(torch.randn(100, 128))
>>> output = triplet_loss(input1, input2, input3)
>>> output.backward()
.. _Learning shallow convolutional feature descriptors with triplet losses:
http://www.iis.ee.ic.ac.uk/%7Evbalnt/shallow_descr/TFeat_paper.pdf
"""
def __init__(self, p=2, eps=1e-06, swap=False):
super(TripletLogExpLossNew, self).__init__()
self.p = p
self.eps = eps
self.swap = swap
def eval_func(self, dp, dn):
return np.log(1 + np.exp(dp - dn))
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| zhangxue123/deep-image-retrieval | TripletLogExpLoss | false | 13,170 | [
"BSD-3-Clause"
]
| 0 | ac188856fa5a034aed3f7ed3fb617d580da44462 | https://github.com/zhangxue123/deep-image-retrieval/tree/ac188856fa5a034aed3f7ed3fb617d580da44462 |
AttentionConditioningLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/4c/c4cykkdmz56vlp2wahl4jc3jl7iv36zflp6ubiyyjz5poeoferuj.py
# Topologically Sorted Source Nodes: [conv_signal, input_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv_signal => convolution
# input_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1], [2], [1], False, [0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/54/c54dbsignvfjagpsaupubwjr56qca2javq777mxrypcfibc4vd2y.py
# Topologically Sorted Source Nodes: [conv_signal_1, input_2], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# conv_signal_1 => convolution_1
# input_2 => sigmoid
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1], [1], [1], False, [0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 163840
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 640
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x3), tmp3, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (32, 2, 5), (10, 5, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 2, 64), (128, 64, 1))
assert_size_stride(primals_4, (640, 32, 3), (96, 3, 1))
assert_size_stride(primals_5, (640, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv_signal], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64), (2048, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv_signal, input_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 8192, grid=grid(8192), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv_signal_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 640, 64), (40960, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv_signal_1, input_2], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_1.run(buf3, primals_5, 163840, grid=grid(163840), stream=stream0)
del primals_5
return (buf3, primals_1, primals_3, primals_4, buf1, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 2, 5), (10, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 2, 64), (128, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((640, 32, 3), (96, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((640, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
from torch import nn
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNorm, self).__init__()
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(self.conv.weight, gain=torch.nn.init.
calculate_gain(w_init_gain))
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
class AttentionConditioningLayer(nn.Module):
"""Adapted from the LocationLayer in https://github.com/NVIDIA/tacotron2/blob/master/model.py
1D Conv model over a concatenation of the previous attention and the accumulated attention values
"""
def __init__(self, input_dim=2, attention_n_filters=32,
attention_kernel_sizes=[5, 3], attention_dim=640):
super(AttentionConditioningLayer, self).__init__()
self.location_conv_hidden = ConvNorm(input_dim, attention_n_filters,
kernel_size=attention_kernel_sizes[0], padding=None, bias=True,
stride=1, dilation=1, w_init_gain='relu')
self.location_conv_out = ConvNorm(attention_n_filters,
attention_dim, kernel_size=attention_kernel_sizes[1], padding=
None, bias=True, stride=1, dilation=1, w_init_gain='sigmoid')
self.conv_layers = nn.Sequential(self.location_conv_hidden, nn.ReLU
(), self.location_conv_out, nn.Sigmoid())
def forward(self, attention_weights_cat):
return self.conv_layers(attention_weights_cat)
def get_inputs():
return [torch.rand([4, 2, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 640
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (32, 2, 5), (10, 5, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 2, 64), (128, 64, 1))
assert_size_stride(primals_4, (640, 32, 3), (96, 3, 1))
assert_size_stride(primals_5, (640,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(2,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64), (2048, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(8192)](buf1, primals_2,
8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 640, 64), (40960, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_sigmoid_1[grid(163840)](buf3,
primals_5, 163840, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1, buf3
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNorm, self).__init__()
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(self.conv.weight, gain=torch.nn.init.
calculate_gain(w_init_gain))
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
class AttentionConditioningLayerNew(nn.Module):
"""Adapted from the LocationLayer in https://github.com/NVIDIA/tacotron2/blob/master/model.py
1D Conv model over a concatenation of the previous attention and the accumulated attention values
"""
def __init__(self, input_dim=2, attention_n_filters=32,
attention_kernel_sizes=[5, 3], attention_dim=640):
super(AttentionConditioningLayerNew, self).__init__()
self.location_conv_hidden = ConvNorm(input_dim, attention_n_filters,
kernel_size=attention_kernel_sizes[0], padding=None, bias=True,
stride=1, dilation=1, w_init_gain='relu')
self.location_conv_out = ConvNorm(attention_n_filters,
attention_dim, kernel_size=attention_kernel_sizes[1], padding=
None, bias=True, stride=1, dilation=1, w_init_gain='sigmoid')
self.conv_layers = nn.Sequential(self.location_conv_hidden, nn.ReLU
(), self.location_conv_out, nn.Sigmoid())
def forward(self, input_0):
primals_1 = self.location_conv_hidden.conv.weight
primals_2 = self.location_conv_hidden.conv.bias
primals_4 = self.location_conv_out.conv.weight
primals_5 = self.location_conv_out.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| zachwe/flowtron | AttentionConditioningLayer | false | 13,171 | [
"Apache-2.0"
]
| 0 | 28da7fbdb8c2851c835a355ae5cce45cc30bbc84 | https://github.com/zachwe/flowtron/tree/28da7fbdb8c2851c835a355ae5cce45cc30bbc84 |
FastSigmoid | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/p5/cp5p3gl5ql57e4neclfv4vl2iwxdlgkslachrrr4wbs4bafshsmq.py
# Topologically Sorted Source Nodes: [abs_1, abs_2, div], Original ATen: [aten.abs, aten.add, aten.div]
# Source node to ATen node mapping:
# abs_1 => abs_1
# abs_2 => add
# div => div
# Graph fragment:
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%abs_1, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %add), kwargs = {})
triton_poi_fused_abs_add_div_0 = async_compile.triton('triton_poi_fused_abs_add_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_add_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_abs_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl_math.abs(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tmp0 / tmp3
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [abs_1, abs_2, div], Original ATen: [aten.abs, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_abs_add_div_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch
import torch.nn as nn
class FastSigmoid(nn.Module):
def __init__(self):
super(FastSigmoid, self).__init__()
def forward(self, x):
abs = torch.abs(x) + 1
return torch.div(x, abs)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.abs(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tmp0 / tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_add_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class FastSigmoidNew(nn.Module):
def __init__(self):
super(FastSigmoidNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| zhuxyme/zxySRFBN_CVPR2019 | FastSigmoid | false | 13,172 | [
"MIT"
]
| 0 | c1afe776e7759bc05f2235b6db708e337cf2ae0e | https://github.com/zhuxyme/zxySRFBN_CVPR2019/tree/c1afe776e7759bc05f2235b6db708e337cf2ae0e |
LanguageModelCriterion | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/bk/cbkgxpfet2dg3ve2mpd5dry3yx3ra22ygcjs5fecs2h77kxz2vko.py
# Topologically Sorted Source Nodes: [gather, neg, output, sum_1, sum_2, output_1], Original ATen: [aten.gather, aten.neg, aten.mul, aten.sum, aten.div]
# Source node to ATen node mapping:
# gather => gather
# neg => neg
# output => mul
# output_1 => div
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %gather : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%view, 1, %view_1), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%gather,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %view_2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view_2,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_per_fused_div_gather_mul_neg_sum_0 = async_compile.triton('triton_per_fused_div_gather_mul_neg_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_gather_mul_neg_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_gather_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp9 = tl.load(in_ptr2 + (r0), None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4), "index out of bounds: 0 <= tmp4 < 4")
tmp6 = tl.load(in_ptr1 + (tmp4 + (4*r0)), None, eviction_policy='evict_last')
tmp7 = -tmp6
tmp8 = tmp7.to(tl.float32)
tmp10 = tmp8 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp17 = tmp13 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp17, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [gather, neg, output, sum_1, sum_2, output_1], Original ATen: [aten.gather, aten.neg, aten.mul, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_div_gather_mul_neg_sum_0.run(buf2, arg1_1, arg0_1, arg2_1, 1, 16, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.int64)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.autograd import *
def to_contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
class LanguageModelCriterion(nn.Module):
def __init__(self):
super(LanguageModelCriterion, self).__init__()
def forward(self, input, target, mask):
target = target[:, :input.size(1)]
mask = mask[:, :input.size(1)]
input = to_contiguous(input).view(-1, input.size(2))
target = to_contiguous(target).view(-1, 1)
mask = to_contiguous(mask).view(-1, 1)
output = -input.gather(1, target) * mask
output = torch.sum(output) / torch.sum(mask)
return output
def get_inputs():
return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4],
dtype=torch.int64), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.autograd import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_gather_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr2 + r0, None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4),
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy=
'evict_last')
tmp7 = -tmp6
tmp8 = tmp7.to(tl.float32)
tmp10 = tmp8 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp17 = tmp13 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_gather_mul_neg_sum_0[grid(1)](buf2, arg1_1,
arg0_1, arg2_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
def to_contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
class LanguageModelCriterionNew(nn.Module):
def __init__(self):
super(LanguageModelCriterionNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| zhlnhn/ImageNewsMatching | LanguageModelCriterion | false | 13,173 | [
"MIT"
]
| 0 | a9ebfc5f7669621cfc37510d6d9476a7b7a86eaa | https://github.com/zhlnhn/ImageNewsMatching/tree/a9ebfc5f7669621cfc37510d6d9476a7b7a86eaa |
L2Norm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/aq/caqr3sakwoofh553ujdgdtlagzy24ygwtyr2sratsmpvljzyoiyj.py
# Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, x, out], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div, aten.mul]
# Source node to ATen node mapping:
# norm => add
# out => mul
# pow_1 => pow_1
# sqrt => sqrt
# sum_1 => sum_1
# x => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-10), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %add), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, %div), kwargs = {})
# %copy_ : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%primals_1, %div), kwargs = {})
triton_poi_fused_add_div_mul_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sqrt_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sqrt_sum_0', 'mutated_arg_names': ['in_ptr0', 'out_ptr2'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-10
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tmp17 = tmp16 * tmp15
tl.store(out_ptr0 + (x3), tmp15, xmask)
tl.store(out_ptr1 + (x3), tmp17, xmask)
tl.store(out_ptr2 + (x3), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, x, out], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sqrt_sum_0.run(primals_1, primals_2, buf0, buf1, primals_1, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
return (buf1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from math import sqrt as sqrt
from itertools import product as product
import torch.nn as nn
import torch.nn.init as init
class L2Norm(nn.Module):
def __init__(self, n_channels, scale):
super(L2Norm, self).__init__()
self.n_channels = n_channels
self.gamma = scale or None
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.reset_parameters()
def reset_parameters(self):
init.constant(self.weight, self.gamma)
def forward(self, x):
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps
x /= norm
out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x
) * x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_channels': 4, 'scale': 1.0}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from math import sqrt as sqrt
from itertools import product as product
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-10
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tmp17 = tmp16 * tmp15
tl.store(out_ptr0 + x3, tmp15, xmask)
tl.store(out_ptr1 + x3, tmp17, xmask)
tl.store(out_ptr2 + x3, tmp15, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](primals_1,
primals_2, buf0, buf1, primals_1, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_1
del primals_2
return buf1, buf0
class L2NormNew(nn.Module):
def __init__(self, n_channels, scale):
super(L2NormNew, self).__init__()
self.n_channels = n_channels
self.gamma = scale or None
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.reset_parameters()
def reset_parameters(self):
init.constant(self.weight, self.gamma)
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| zhujiagang/realtime-neg | L2Norm | false | 13,174 | [
"MIT"
]
| 0 | 7e228edc5f2d93d0eee7f3880f0b8473d8c71d27 | https://github.com/zhujiagang/realtime-neg/tree/7e228edc5f2d93d0eee7f3880f0b8473d8c71d27 |
SimpleNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/73/c73woslnebd5oxnlopq7pyfdilsu3hof2ng4ykccwru5vdtfqdzf.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_6, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ha/chaidymny46jjqjd472shtdzxkvunamt4xbhocbd7wa4vyf4ugx6.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.view]
# Source node to ATen node mapping:
# x_2 => view_7
# Graph fragment:
# %view_7 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_6, [64, 50]), kwargs = {})
triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 50
x1 = (xindex // 50)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (50*x1) + (200*((x1 % 4) // 4)) + (800*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/m2/cm2cagnazxvlnfyn7xbwkbgv5xij5qlqejjhns34jacdqk32xypo.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_3 => relu_1
# Graph fragment:
# %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_8,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_13, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 100
x3 = (xindex // 1600)
x5 = xindex % 1600
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x5 + (1664*x3)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/mx/cmx4kuzvicyoowiqum2e3cive2xt5em26squ7jb7xxxklrykvpnj.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.view]
# Source node to ATen node mapping:
# x_4 => view_14
# Graph fragment:
# %view_14 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_13, [64, 100]), kwargs = {})
triton_poi_fused_view_3 = async_compile.triton('triton_poi_fused_view_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 100
x1 = (xindex // 100)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (100*x1) + (400*((x1 % 4) // 4)) + (1600*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (50, 4), (4, 1))
assert_size_stride(primals_2, (50, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (100, 50), (50, 1))
assert_size_stride(primals_5, (100, ), (1, ))
assert_size_stride(primals_6, (1, 100), (100, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 50), (50, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 50), (800, 200, 50, 1), 0); del buf0 # reuse
buf9 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf9, 3200, grid=grid(3200), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 50), (50, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf1, buf2, 3200, grid=grid(3200), stream=stream0)
del buf1
buf3 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (50, 100), (1, 50), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 100), (1600, 400, 100, 1), 0); del buf3 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf4, primals_5, buf8, 6400, grid=grid(6400), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.view]
triton_poi_fused_view_3.run(buf4, buf5, 6400, grid=grid(6400), stream=stream0)
del buf4
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6, (100, 1), (1, 100), 0), alpha=1, beta=1, out=buf7)
del primals_7
return (reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, buf5, primals_6, buf8, primals_4, buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((50, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((100, 50), (50, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 100), (100, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class SimpleNN(nn.Module):
def __init__(self, input_dim):
super(SimpleNN, self).__init__()
self.linear1 = nn.Linear(input_dim, 50)
self.relu = nn.ReLU(inplace=True)
self.linear2 = nn.Linear(50, 100)
self.out = nn.Linear(100, 1)
def forward(self, x):
x = self.linear1(x)
x = self.relu(x)
x = self.linear2(x)
x = self.relu(x)
x = self.out(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 50
x1 = xindex // 50
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 50 * x1 + 200 * (x1 % 4 // 4) + 800 * ((
4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 100
x3 = xindex // 1600
x5 = xindex % 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x5 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 100
x1 = xindex // 100
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 100 * x1 + 400 * (x1 % 4 // 4) + 1600 *
((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (50, 4), (4, 1))
assert_size_stride(primals_2, (50,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (100, 50), (50, 1))
assert_size_stride(primals_5, (100,), (1,))
assert_size_stride(primals_6, (1, 100), (100, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 50), (50, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 50), (800, 200, 50, 1), 0)
del buf0
buf9 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(3200)](buf1,
primals_2, buf9, 3200, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 50), (50, 1), torch.float32)
triton_poi_fused_view_1[grid(3200)](buf1, buf2, 3200, XBLOCK=128,
num_warps=4, num_stages=1)
del buf1
buf3 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (50, 100), (1,
50), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 100), (1600, 400, 100, 1), 0)
del buf3
buf8 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(6400)](buf4,
primals_5, buf8, 6400, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
triton_poi_fused_view_3[grid(6400)](buf4, buf5, 6400, XBLOCK=256,
num_warps=4, num_stages=1)
del buf4
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6,
(100, 1), (1, 100), 0), alpha=1, beta=1, out=buf7)
del primals_7
return reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, buf5, primals_6, buf8, primals_4, buf9
class SimpleNNNew(nn.Module):
def __init__(self, input_dim):
super(SimpleNNNew, self).__init__()
self.linear1 = nn.Linear(input_dim, 50)
self.relu = nn.ReLU(inplace=True)
self.linear2 = nn.Linear(50, 100)
self.out = nn.Linear(100, 1)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.out.weight
primals_7 = self.out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| zhaofeng-shu33/Learning_From_Data_2019_Fall | SimpleNN | false | 13,175 | [
"MIT"
]
| 0 | 3e5e1f834c8057817d2e9c3e3fc8d7880fa3a1bd | https://github.com/zhaofeng-shu33/Learning_From_Data_2019_Fall/tree/3e5e1f834c8057817d2e9c3e3fc8d7880fa3a1bd |
SimpleMLP | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/mi/cmi476zw6ohnah2zxegdqc6fvw4ui6ahcdpt576v245i6ehle2hj.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.elu]
# Source node to ATen node mapping:
# x => expm1, gt, mul, mul_2, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {})
triton_poi_fused_elu_0 = async_compile.triton('triton_poi_fused_elu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 1), (1, 1))
assert_size_stride(primals_5, (1, ), (1, ))
assert_size_stride(primals_6, (1, 1), (1, 1))
assert_size_stride(primals_7, (1, ), (1, ))
assert_size_stride(primals_8, (1, 1), (1, 1))
assert_size_stride(primals_9, (1, ), (1, ))
assert_size_stride(primals_10, (1, 1), (1, 1))
assert_size_stride(primals_11, (1, ), (1, ))
assert_size_stride(primals_12, (1, 1), (1, 1))
assert_size_stride(primals_13, (1, ), (1, ))
assert_size_stride(primals_14, (1, 1), (1, 1))
assert_size_stride(primals_15, (1, ), (1, ))
assert_size_stride(primals_16, (4, 1), (1, 1))
assert_size_stride(primals_17, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.elu]
stream0 = get_raw_stream(0)
triton_poi_fused_elu_0.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 1), (1, 0), 0), primals_4, alpha=1, beta=1, out=buf4)
del primals_5
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.elu]
triton_poi_fused_elu_0.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 1), (1, 0), 0), primals_6, alpha=1, beta=1, out=buf7)
del primals_7
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.elu]
triton_poi_fused_elu_0.run(buf7, buf8, 64, grid=grid(64), stream=stream0)
buf10 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf8, (64, 1), (1, 0), 0), primals_8, alpha=1, beta=1, out=buf10)
del primals_9
buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.elu]
triton_poi_fused_elu_0.run(buf10, buf11, 64, grid=grid(64), stream=stream0)
buf13 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (64, 1), (1, 0), 0), primals_10, alpha=1, beta=1, out=buf13)
del primals_11
buf14 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.elu]
triton_poi_fused_elu_0.run(buf13, buf14, 64, grid=grid(64), stream=stream0)
buf16 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, reinterpret_tensor(buf14, (64, 1), (1, 0), 0), primals_12, alpha=1, beta=1, out=buf16)
del primals_13
buf17 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.elu]
triton_poi_fused_elu_0.run(buf16, buf17, 64, grid=grid(64), stream=stream0)
buf19 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_6], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_15, reinterpret_tensor(buf17, (64, 1), (1, 0), 0), primals_14, alpha=1, beta=1, out=buf19)
del primals_15
buf20 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.elu]
triton_poi_fused_elu_0.run(buf19, buf20, 64, grid=grid(64), stream=stream0)
buf21 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_17, reinterpret_tensor(buf20, (64, 1), (1, 0), 0), reinterpret_tensor(primals_16, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf21)
del primals_17
return (reinterpret_tensor(buf21, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 1), (1, 1), 0), buf4, reinterpret_tensor(buf5, (64, 1), (1, 1), 0), buf7, reinterpret_tensor(buf8, (64, 1), (1, 1), 0), buf10, reinterpret_tensor(buf11, (64, 1), (1, 1), 0), buf13, reinterpret_tensor(buf14, (64, 1), (1, 1), 0), buf16, reinterpret_tensor(buf17, (64, 1), (1, 1), 0), buf19, reinterpret_tensor(buf20, (64, 1), (1, 1), 0), primals_16, primals_14, primals_12, primals_10, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.optim
import torch.jit
import torch.nn as nn
class SimpleMLP(nn.Module):
def __init__(self, num_in_features, num_out_features, neurons_per_layer):
super(SimpleMLP, self).__init__()
self.act = nn.ELU()
self.l_in = nn.Linear(in_features=num_in_features, out_features=
neurons_per_layer)
self.l1 = nn.Linear(in_features=neurons_per_layer, out_features=
neurons_per_layer)
self.l2 = nn.Linear(in_features=neurons_per_layer, out_features=
neurons_per_layer)
self.l3 = nn.Linear(in_features=neurons_per_layer, out_features=
neurons_per_layer)
self.l4 = nn.Linear(in_features=neurons_per_layer, out_features=
neurons_per_layer)
self.l5 = nn.Linear(in_features=neurons_per_layer, out_features=
neurons_per_layer)
self.l6 = nn.Linear(in_features=neurons_per_layer, out_features=
neurons_per_layer)
self.l_out = nn.Linear(in_features=neurons_per_layer, out_features=
num_out_features)
torch.nn.init.xavier_normal_(self.l_in.weight)
torch.nn.init.zeros_(self.l_in.bias)
torch.nn.init.xavier_normal_(self.l1.weight)
torch.nn.init.zeros_(self.l1.bias)
torch.nn.init.xavier_normal_(self.l2.weight)
torch.nn.init.zeros_(self.l2.bias)
torch.nn.init.xavier_normal_(self.l3.weight)
torch.nn.init.zeros_(self.l3.bias)
torch.nn.init.xavier_normal_(self.l4.weight)
torch.nn.init.zeros_(self.l4.bias)
torch.nn.init.xavier_normal_(self.l5.weight)
torch.nn.init.zeros_(self.l5.bias)
torch.nn.init.xavier_normal_(self.l6.weight)
torch.nn.init.zeros_(self.l6.bias)
torch.nn.init.xavier_normal_(self.l_out.weight)
torch.nn.init.zeros_(self.l_out.bias)
def forward(self, x):
x = self.act(self.l_in(x))
x = self.act(self.l1(x))
x = self.act(self.l2(x))
x = self.act(self.l3(x))
x = self.act(self.l4(x))
x = self.act(self.l5(x))
x = self.act(self.l6(x))
x = self.l_out(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_in_features': 4, 'num_out_features': 4,
'neurons_per_layer': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.optim
import torch.jit
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 1), (1, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (1, 1), (1, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (1, 1), (1, 1))
assert_size_stride(primals_9, (1,), (1,))
assert_size_stride(primals_10, (1, 1), (1, 1))
assert_size_stride(primals_11, (1,), (1,))
assert_size_stride(primals_12, (1, 1), (1, 1))
assert_size_stride(primals_13, (1,), (1,))
assert_size_stride(primals_14, (1, 1), (1, 1))
assert_size_stride(primals_15, (1,), (1,))
assert_size_stride(primals_16, (4, 1), (1, 1))
assert_size_stride(primals_17, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_elu_0[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 1), (
1, 0), 0), primals_4, alpha=1, beta=1, out=buf4)
del primals_5
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_elu_0[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 1), (
1, 0), 0), primals_6, alpha=1, beta=1, out=buf7)
del primals_7
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_elu_0[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf8, (64, 1), (
1, 0), 0), primals_8, alpha=1, beta=1, out=buf10)
del primals_9
buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_elu_0[grid(64)](buf10, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf13 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (64, 1),
(1, 0), 0), primals_10, alpha=1, beta=1, out=buf13)
del primals_11
buf14 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_elu_0[grid(64)](buf13, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf14, (64, 1),
(1, 0), 0), primals_12, alpha=1, beta=1, out=buf16)
del primals_13
buf17 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_elu_0[grid(64)](buf16, buf17, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_15, reinterpret_tensor(buf17, (64, 1),
(1, 0), 0), primals_14, alpha=1, beta=1, out=buf19)
del primals_15
buf20 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_elu_0[grid(64)](buf19, buf20, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf21 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_17, reinterpret_tensor(buf20, (64, 1),
(1, 0), 0), reinterpret_tensor(primals_16, (1, 4), (1, 1), 0),
alpha=1, beta=1, out=buf21)
del primals_17
return (reinterpret_tensor(buf21, (4, 4, 4, 4), (64, 16, 4, 1), 0),
reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1,
reinterpret_tensor(buf2, (64, 1), (1, 1), 0), buf4,
reinterpret_tensor(buf5, (64, 1), (1, 1), 0), buf7,
reinterpret_tensor(buf8, (64, 1), (1, 1), 0), buf10,
reinterpret_tensor(buf11, (64, 1), (1, 1), 0), buf13,
reinterpret_tensor(buf14, (64, 1), (1, 1), 0), buf16,
reinterpret_tensor(buf17, (64, 1), (1, 1), 0), buf19,
reinterpret_tensor(buf20, (64, 1), (1, 1), 0), primals_16,
primals_14, primals_12, primals_10, primals_8, primals_6, primals_4)
class SimpleMLPNew(nn.Module):
def __init__(self, num_in_features, num_out_features, neurons_per_layer):
super(SimpleMLPNew, self).__init__()
self.act = nn.ELU()
self.l_in = nn.Linear(in_features=num_in_features, out_features=
neurons_per_layer)
self.l1 = nn.Linear(in_features=neurons_per_layer, out_features=
neurons_per_layer)
self.l2 = nn.Linear(in_features=neurons_per_layer, out_features=
neurons_per_layer)
self.l3 = nn.Linear(in_features=neurons_per_layer, out_features=
neurons_per_layer)
self.l4 = nn.Linear(in_features=neurons_per_layer, out_features=
neurons_per_layer)
self.l5 = nn.Linear(in_features=neurons_per_layer, out_features=
neurons_per_layer)
self.l6 = nn.Linear(in_features=neurons_per_layer, out_features=
neurons_per_layer)
self.l_out = nn.Linear(in_features=neurons_per_layer, out_features=
num_out_features)
torch.nn.init.xavier_normal_(self.l_in.weight)
torch.nn.init.zeros_(self.l_in.bias)
torch.nn.init.xavier_normal_(self.l1.weight)
torch.nn.init.zeros_(self.l1.bias)
torch.nn.init.xavier_normal_(self.l2.weight)
torch.nn.init.zeros_(self.l2.bias)
torch.nn.init.xavier_normal_(self.l3.weight)
torch.nn.init.zeros_(self.l3.bias)
torch.nn.init.xavier_normal_(self.l4.weight)
torch.nn.init.zeros_(self.l4.bias)
torch.nn.init.xavier_normal_(self.l5.weight)
torch.nn.init.zeros_(self.l5.bias)
torch.nn.init.xavier_normal_(self.l6.weight)
torch.nn.init.zeros_(self.l6.bias)
torch.nn.init.xavier_normal_(self.l_out.weight)
torch.nn.init.zeros_(self.l_out.bias)
def forward(self, input_0):
primals_1 = self.l_in.weight
primals_2 = self.l_in.bias
primals_4 = self.l1.weight
primals_5 = self.l1.bias
primals_6 = self.l2.weight
primals_7 = self.l2.bias
primals_8 = self.l3.weight
primals_9 = self.l3.bias
primals_10 = self.l4.weight
primals_11 = self.l4.bias
primals_12 = self.l5.weight
primals_13 = self.l5.bias
primals_14 = self.l6.weight
primals_15 = self.l6.bias
primals_16 = self.l_out.weight
primals_17 = self.l_out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0]
| zhaofeng-shu33/deep_euler_tests | SimpleMLP | false | 13,176 | [
"MIT"
]
| 0 | a3d0961af679d490b0c58873ee0726234122bc7a | https://github.com/zhaofeng-shu33/deep_euler_tests/tree/a3d0961af679d490b0c58873ee0726234122bc7a |
PSNR | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/np/cnp5peaudws3oiaugbrawibehsddo2ovc64uhbacz4txujiu7hni.py
# Topologically Sorted Source Nodes: [mse, add, log10, mul], Original ATen: [aten.mse_loss, aten.add, aten.log10, aten.mul]
# Source node to ATen node mapping:
# add => add
# log10 => log10
# mse => mean, pow_1, sub
# mul => mul
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-12), kwargs = {})
# %log10 : [num_users=1] = call_function[target=torch.ops.aten.log10.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log10, -10), kwargs = {})
triton_per_fused_add_log10_mse_loss_mul_0 = async_compile.triton('triton_per_fused_add_log10_mse_loss_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_log10_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_log10_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 1e-12
tmp10 = tmp8 + tmp9
tmp11 = libdevice.log10(tmp10)
tmp12 = -10.0
tmp13 = tmp11 * tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp13, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mse, add, log10, mul], Original ATen: [aten.mse_loss, aten.add, aten.log10, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_add_log10_mse_loss_mul_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch as th
class PSNR(th.nn.Module):
def __init__(self):
super(PSNR, self).__init__()
self.mse = th.nn.MSELoss()
def forward(self, out, ref):
mse = self.mse(out, ref)
return -10 * th.log10(mse + 1e-12)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch as th
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log10_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 1e-12
tmp10 = tmp8 + tmp9
tmp11 = libdevice.log10(tmp10)
tmp12 = -10.0
tmp13 = tmp11 * tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp13, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_log10_mse_loss_mul_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class PSNRNew(th.nn.Module):
def __init__(self):
super(PSNRNew, self).__init__()
self.mse = th.nn.MSELoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| zsinsense/demosaicnet | PSNR | false | 13,177 | [
"MIT"
]
| 0 | bbe8151cab86dbe46b76806cf9ec353994b389ff | https://github.com/zsinsense/demosaicnet/tree/bbe8151cab86dbe46b76806cf9ec353994b389ff |
APLoss_dist | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/k2/ck266gaihvgoxly4bowauykafsvsnnafl6krevx6lg3fdryk46d2.py
# Topologically Sorted Source Nodes: [mul, sub, sqrt, d], Original ATen: [aten.mul, aten.rsub, aten.sqrt]
# Source node to ATen node mapping:
# d => sub_1
# mul => mul
# sqrt => sqrt
# sub => sub
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (2.001, %mul), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sub,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sqrt), kwargs = {})
triton_poi_fused_mul_rsub_sqrt_0 = async_compile.triton('triton_poi_fused_mul_rsub_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_rsub_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_rsub_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 2.001
tmp4 = tmp3 - tmp2
tmp5 = libdevice.sqrt(tmp4)
tmp6 = 1.0
tmp7 = tmp6 - tmp5
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/nd/cndxcqxuvs23ysbpwbbandbdmscpjru5cc77ujvj233azqylzu24.py
# Topologically Sorted Source Nodes: [min_1, q_1, mul_1, rec, cumsum, nbs, cumsum_1, add, prec, sum_3, rec_1, mul_2, ap], Original ATen: [aten.minimum, aten.clamp, aten.mul, aten.sum, aten.cumsum, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# ap => sum_4
# cumsum => cumsum
# cumsum_1 => cumsum_1
# min_1 => minimum
# mul_1 => mul_1
# mul_2 => mul_2
# nbs => sum_1
# prec => div
# q_1 => clamp_min
# rec => sum_2
# rec_1 => div_1
# sum_3 => sum_3
# Graph fragment:
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%slice_2, %slice_4), kwargs = {})
# %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%minimum, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_min, %view), kwargs = {})
# %sum_2 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [-1]), kwargs = {})
# %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%sum_2, -1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min, [-1]), kwargs = {})
# %cumsum_1 : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%sum_1, -1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%cumsum_1, 1e-16), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%cumsum, %add), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_2, [-1]), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, %unsqueeze_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %div_1), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [-1]), kwargs = {})
triton_per_fused_add_clamp_cumsum_div_minimum_mul_sum_1 = async_compile.triton('triton_per_fused_add_clamp_cumsum_div_minimum_mul_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton_heuristics.persistent_reduction(
size_hints=[4, 32],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_cumsum_div_minimum_mul_sum_1', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 14, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_cumsum_div_minimum_mul_sum_1(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 25
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + ((4*r1) + (200*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (r1), rmask, eviction_policy='evict_last', other=0.0)
tmp3 = tl.load(in_ptr0 + (100 + (4*r1) + (200*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp4 = tl.load(in_ptr1 + (25 + r1), rmask, eviction_policy='evict_last', other=0.0)
tmp9 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (1 + (4*r1) + (200*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tl.load(in_ptr0 + (101 + (4*r1) + (200*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (2 + (4*r1) + (200*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr0 + (102 + (4*r1) + (200*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp26 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (3 + (4*r1) + (200*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp31 = tl.load(in_ptr0 + (103 + (4*r1) + (200*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp35 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.minimum(tmp2, tmp5)
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp10 = tmp8 * tmp9
tmp12 = tmp11 + tmp1
tmp14 = tmp13 + tmp4
tmp15 = triton_helpers.minimum(tmp12, tmp14)
tmp16 = triton_helpers.maximum(tmp15, tmp7)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 + tmp18
tmp21 = tmp20 + tmp1
tmp23 = tmp22 + tmp4
tmp24 = triton_helpers.minimum(tmp21, tmp23)
tmp25 = triton_helpers.maximum(tmp24, tmp7)
tmp27 = tmp25 * tmp26
tmp28 = tmp19 + tmp27
tmp30 = tmp29 + tmp1
tmp32 = tmp31 + tmp4
tmp33 = triton_helpers.minimum(tmp30, tmp32)
tmp34 = triton_helpers.maximum(tmp33, tmp7)
tmp36 = tmp34 * tmp35
tmp37 = tmp28 + tmp36
tmp38 = tmp8 + tmp16
tmp39 = tmp38 + tmp25
tmp40 = tmp39 + tmp34
tmp41 = tmp40.to(tl.float32)
tmp42 = tl.broadcast_to(tmp41, [XBLOCK, RBLOCK])
tmp43, = tl.associative_scan((tmp42,), 1, _triton_helper_fn_add0)
tmp44 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp46 = tl.where(rmask & xmask, tmp44, 0)
tmp47 = tl.sum(tmp46, 1)[:, None]
tmp48 = tmp37.to(tl.float32)
tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK])
tmp50, = tl.associative_scan((tmp49,), 1, _triton_helper_fn_add0)
tmp51 = 1e-16
tmp52 = tmp43 + tmp51
tmp53 = tmp50 / tmp52
tmp54 = tmp37 / tmp47
tmp55 = tmp53 * tmp54
tmp56 = tl.broadcast_to(tmp55, [XBLOCK, RBLOCK])
tmp58 = tl.where(rmask & xmask, tmp56, 0)
tmp59 = tl.sum(tmp58, 1)[:, None]
tl.store(in_out_ptr1 + (x0), tmp59, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/u2/cu2t6bkeledk75diwog42b5g2ekxve4rpnk5loywlit7ulhimxzl.py
# Topologically Sorted Source Nodes: [mean, sub_2], Original ATen: [aten.mean, aten.rsub]
# Source node to ATen node mapping:
# mean => mean
# sub_2 => sub_2
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_4,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mean), kwargs = {})
triton_per_fused_mean_rsub_2 = async_compile.triton('triton_per_fused_mean_rsub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_rsub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_rsub_2(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp4 = 4.0
tmp5 = tmp3 / tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp7, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (50, 1, 1), (1, 1, 1))
assert_size_stride(arg3_1, (50, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sub, sqrt, d], Original ATen: [aten.mul, aten.rsub, aten.sqrt]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_rsub_sqrt_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 1, 4), (4, 0, 1), 0), arg2_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 50, 4), (200, 4, 1))
del arg2_1
del buf0
buf6 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [min_1, q_1, mul_1, rec, cumsum, nbs, cumsum_1, add, prec, sum_3, rec_1, mul_2, ap], Original ATen: [aten.minimum, aten.clamp, aten.mul, aten.sum, aten.cumsum, aten.add, aten.div]
triton_per_fused_add_clamp_cumsum_div_minimum_mul_sum_1.run(buf7, buf1, arg3_1, arg1_1, 4, 25, grid=grid(4), stream=stream0)
del arg1_1
del arg3_1
del buf1
buf8 = empty_strided_cuda((), (), torch.float32)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [mean, sub_2], Original ATen: [aten.mean, aten.rsub]
triton_per_fused_mean_rsub_2.run(buf9, buf7, 1, 4, grid=grid(1), stream=stream0)
del buf7
return (buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((50, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
def sim_to_dist(scores):
return 1 - torch.sqrt(2.001 - 2 * scores)
class APLoss(nn.Module):
""" Differentiable AP loss, through quantization. From the paper:
Learning with Average Precision: Training Image Retrieval with a Listwise Loss
Jerome Revaud, Jon Almazan, Rafael Sampaio de Rezende, Cesar de Souza
https://arxiv.org/abs/1906.07589
Input: (N, M) values in [min, max]
label: (N, M) values in {0, 1}
Returns: 1 - mAP (mean AP for each n in {1..N})
Note: typically, this is what you wanna minimize
"""
def __init__(self, nq=25, min=0, max=1):
nn.Module.__init__(self)
assert isinstance(nq, int) and 2 <= nq <= 100
self.nq = nq
self.min = min
self.max = max
gap = max - min
assert gap > 0
self.quantizer = q = nn.Conv1d(1, 2 * nq, kernel_size=1, bias=True)
q.weight = nn.Parameter(q.weight.detach(), requires_grad=False)
q.bias = nn.Parameter(q.bias.detach(), requires_grad=False)
a = (nq - 1) / gap
q.weight[:nq] = -a
q.bias[:nq] = torch.from_numpy(a * min + np.arange(nq, 0, -1))
q.weight[nq:] = a
q.bias[nq:] = torch.from_numpy(np.arange(2 - nq, 2, 1) - a * min)
q.weight[0] = q.weight[-1] = 0
q.bias[0] = q.bias[-1] = 1
def forward(self, x, label, qw=None, ret='1-mAP'):
assert x.shape == label.shape
N, M = x.shape
q = self.quantizer(x.unsqueeze(1))
q = torch.min(q[:, :self.nq], q[:, self.nq:]).clamp(min=0)
nbs = q.sum(dim=-1)
rec = (q * label.view(N, 1, M).float()).sum(dim=-1)
prec = rec.cumsum(dim=-1) / (1e-16 + nbs.cumsum(dim=-1))
rec /= rec.sum(dim=-1).unsqueeze(1)
ap = (prec * rec).sum(dim=-1)
if ret == '1-mAP':
if qw is not None:
ap *= qw
return 1 - ap.mean()
elif ret == 'AP':
assert qw is None
return ap
else:
raise ValueError('Bad return type for APLoss(): %s' % str(ret))
def measures(self, x, gt, loss=None):
if loss is None:
loss = self.forward(x, gt)
return {'loss_ap': float(loss)}
class APLoss_dist(APLoss):
def forward(self, x, label, **kw):
d = sim_to_dist(x)
return APLoss.forward(self, d, label, **kw)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_rsub_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 2.001
tmp4 = tmp3 - tmp2
tmp5 = libdevice.sqrt(tmp4)
tmp6 = 1.0
tmp7 = tmp6 - tmp5
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused_add_clamp_cumsum_div_minimum_mul_sum_1(in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 25
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4 * r1 + 200 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + r1, rmask, eviction_policy='evict_last', other=0.0
)
tmp3 = tl.load(in_ptr0 + (100 + 4 * r1 + 200 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp4 = tl.load(in_ptr1 + (25 + r1), rmask, eviction_policy='evict_last',
other=0.0)
tmp9 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (1 + 4 * r1 + 200 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = tl.load(in_ptr0 + (101 + 4 * r1 + 200 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (2 + 4 * r1 + 200 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr0 + (102 + 4 * r1 + 200 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp26 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr0 + (3 + 4 * r1 + 200 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp31 = tl.load(in_ptr0 + (103 + 4 * r1 + 200 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp35 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.minimum(tmp2, tmp5)
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp10 = tmp8 * tmp9
tmp12 = tmp11 + tmp1
tmp14 = tmp13 + tmp4
tmp15 = triton_helpers.minimum(tmp12, tmp14)
tmp16 = triton_helpers.maximum(tmp15, tmp7)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 + tmp18
tmp21 = tmp20 + tmp1
tmp23 = tmp22 + tmp4
tmp24 = triton_helpers.minimum(tmp21, tmp23)
tmp25 = triton_helpers.maximum(tmp24, tmp7)
tmp27 = tmp25 * tmp26
tmp28 = tmp19 + tmp27
tmp30 = tmp29 + tmp1
tmp32 = tmp31 + tmp4
tmp33 = triton_helpers.minimum(tmp30, tmp32)
tmp34 = triton_helpers.maximum(tmp33, tmp7)
tmp36 = tmp34 * tmp35
tmp37 = tmp28 + tmp36
tmp38 = tmp8 + tmp16
tmp39 = tmp38 + tmp25
tmp40 = tmp39 + tmp34
tmp41 = tmp40.to(tl.float32)
tmp42 = tl.broadcast_to(tmp41, [XBLOCK, RBLOCK])
tmp43, = tl.associative_scan((tmp42,), 1, _triton_helper_fn_add0)
tmp44 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp46 = tl.where(rmask & xmask, tmp44, 0)
tmp47 = tl.sum(tmp46, 1)[:, None]
tmp48 = tmp37.to(tl.float32)
tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK])
tmp50, = tl.associative_scan((tmp49,), 1, _triton_helper_fn_add0)
tmp51 = 1e-16
tmp52 = tmp43 + tmp51
tmp53 = tmp50 / tmp52
tmp54 = tmp37 / tmp47
tmp55 = tmp53 * tmp54
tmp56 = tl.broadcast_to(tmp55, [XBLOCK, RBLOCK])
tmp58 = tl.where(rmask & xmask, tmp56, 0)
tmp59 = tl.sum(tmp58, 1)[:, None]
tl.store(in_out_ptr1 + x0, tmp59, xmask)
@triton.jit
def triton_per_fused_mean_rsub_2(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp4 = 4.0
tmp5 = tmp3 / tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (50, 1, 1), (1, 1, 1))
assert_size_stride(arg3_1, (50,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_rsub_sqrt_0[grid(16)](arg0_1, buf0, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del arg0_1
buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 1, 4
), (4, 0, 1), 0), arg2_1, stride=(1,), padding=(0,), dilation=(
1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 50, 4), (200, 4, 1))
del arg2_1
del buf0
buf6 = empty_strided_cuda((4,), (1,), torch.float32)
buf7 = buf6
del buf6
triton_per_fused_add_clamp_cumsum_div_minimum_mul_sum_1[grid(4)](buf7,
buf1, arg3_1, arg1_1, 4, 25, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del arg3_1
del buf1
buf8 = empty_strided_cuda((), (), torch.float32)
buf9 = buf8
del buf8
triton_per_fused_mean_rsub_2[grid(1)](buf9, buf7, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del buf7
return buf9,
def sim_to_dist(scores):
return 1 - torch.sqrt(2.001 - 2 * scores)
class APLoss(nn.Module):
""" Differentiable AP loss, through quantization. From the paper:
Learning with Average Precision: Training Image Retrieval with a Listwise Loss
Jerome Revaud, Jon Almazan, Rafael Sampaio de Rezende, Cesar de Souza
https://arxiv.org/abs/1906.07589
Input: (N, M) values in [min, max]
label: (N, M) values in {0, 1}
Returns: 1 - mAP (mean AP for each n in {1..N})
Note: typically, this is what you wanna minimize
"""
def __init__(self, nq=25, min=0, max=1):
nn.Module.__init__(self)
assert isinstance(nq, int) and 2 <= nq <= 100
self.nq = nq
self.min = min
self.max = max
gap = max - min
assert gap > 0
self.quantizer = q = nn.Conv1d(1, 2 * nq, kernel_size=1, bias=True)
q.weight = nn.Parameter(q.weight.detach(), requires_grad=False)
q.bias = nn.Parameter(q.bias.detach(), requires_grad=False)
a = (nq - 1) / gap
q.weight[:nq] = -a
q.bias[:nq] = torch.from_numpy(a * min + np.arange(nq, 0, -1))
q.weight[nq:] = a
q.bias[nq:] = torch.from_numpy(np.arange(2 - nq, 2, 1) - a * min)
q.weight[0] = q.weight[-1] = 0
q.bias[0] = q.bias[-1] = 1
def forward(self, x, label, qw=None, ret='1-mAP'):
assert x.shape == label.shape
N, M = x.shape
q = self.quantizer(x.unsqueeze(1))
q = torch.min(q[:, :self.nq], q[:, self.nq:]).clamp(min=0)
nbs = q.sum(dim=-1)
rec = (q * label.view(N, 1, M).float()).sum(dim=-1)
prec = rec.cumsum(dim=-1) / (1e-16 + nbs.cumsum(dim=-1))
rec /= rec.sum(dim=-1).unsqueeze(1)
ap = (prec * rec).sum(dim=-1)
if ret == '1-mAP':
if qw is not None:
ap *= qw
return 1 - ap.mean()
elif ret == 'AP':
assert qw is None
return ap
else:
raise ValueError('Bad return type for APLoss(): %s' % str(ret))
def measures(self, x, gt, loss=None):
if loss is None:
loss = self.forward(x, gt)
return {'loss_ap': float(loss)}
class APLoss_distNew(APLoss):
def forward(self, input_0, input_1):
arg2_1 = self.quantizer.weight
arg3_1 = self.quantizer.bias
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| zhangxue123/deep-image-retrieval | APLoss_dist | false | 13,178 | [
"BSD-3-Clause"
]
| 0 | ac188856fa5a034aed3f7ed3fb617d580da44462 | https://github.com/zhangxue123/deep-image-retrieval/tree/ac188856fa5a034aed3f7ed3fb617d580da44462 |
ClusterAssignment | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/qr/cqramfwzchcxgkcint325vw2hnfsaipzt53x3qxrfqbhcxae7edr.py
# Topologically Sorted Source Nodes: [sub, pow_1, norm_squared, truediv, add, numerator, numerator_1, sum_2, truediv_2], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div, aten.add, aten.reciprocal, aten.mul]
# Source node to ATen node mapping:
# add => add
# norm_squared => sum_1
# numerator => mul, reciprocal
# numerator_1 => pow_2
# pow_1 => pow_1
# sub => sub
# sum_2 => sum_2
# truediv => div
# truediv_2 => div_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %primals_2), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2]), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 1.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 1.0), kwargs = {})
# %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1.0), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul, 1.0), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_2, [1], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_2, %sum_2), kwargs = {})
triton_poi_fused_add_div_mul_pow_reciprocal_sub_sum_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_reciprocal_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_reciprocal_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_pow_reciprocal_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp12 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp4 - tmp1
tmp6 = tmp5 * tmp5
tmp7 = tmp3 + tmp6
tmp9 = tmp8 - tmp1
tmp10 = tmp9 * tmp9
tmp11 = tmp7 + tmp10
tmp13 = tmp12 - tmp1
tmp14 = tmp13 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = 1.0
tmp17 = tmp15 * tmp16
tmp18 = tmp17 + tmp16
tmp19 = tl.full([1], 1, tl.int32)
tmp20 = tmp19 / tmp18
tmp21 = tmp20 * tmp16
tmp22 = tmp21 / tmp21
tl.store(in_out_ptr0 + (x2), tmp22, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, pow_1, norm_squared, truediv, add, numerator, numerator_1, sum_2, truediv_2], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div, aten.add, aten.reciprocal, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_reciprocal_sub_sum_0.run(buf1, primals_1, primals_2, 64, grid=grid(64), stream=stream0)
return (buf1, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn import Parameter
from typing import Optional
class ClusterAssignment(nn.Module):
def __init__(self, cluster_number: 'int', embedding_dimension: 'int',
alpha: 'float'=1.0, cluster_centers: 'Optional[torch.Tensor]'=None
) ->None:
"""
Module to handle the soft assignment, for a description see in 3.1.1. in Xie/Girshick/Farhadi,
where the Student's t-distribution is used measure similarity between feature vector and each
cluster centroid.
:param cluster_number: number of clusters
:param embedding_dimension: embedding dimension of feature vectors
:param alpha: parameter representing the degrees of freedom in the t-distribution, default 1.0
:param cluster_centers: clusters centers to initialise, if None then use Xavier uniform
"""
super(ClusterAssignment, self).__init__()
self.embedding_dimension = embedding_dimension
self.cluster_number = cluster_number
self.alpha = alpha
if cluster_centers is None:
initial_cluster_centers = torch.zeros(self.cluster_number, self
.embedding_dimension, dtype=torch.float)
nn.init.xavier_uniform_(initial_cluster_centers)
else:
initial_cluster_centers = cluster_centers
self.cluster_centers = Parameter(initial_cluster_centers)
def forward(self, batch: 'torch.Tensor') ->torch.Tensor:
"""
Compute the soft assignment for a batch of feature vectors, returning a batch of assignments
for each cluster.
:param batch: FloatTensor of [batch size, embedding dimension]
:return: FloatTensor [batch size, number of clusters]
"""
norm_squared = torch.sum((batch.unsqueeze(1) - self.cluster_centers
) ** 2, 2)
numerator = 1.0 / (1.0 + norm_squared / self.alpha)
power = float(self.alpha + 1) / 2
numerator = numerator ** power
return numerator / torch.sum(numerator, dim=1, keepdim=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'cluster_number': 4, 'embedding_dimension': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn import Parameter
from typing import Optional
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_mul_pow_reciprocal_sub_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp4 - tmp1
tmp6 = tmp5 * tmp5
tmp7 = tmp3 + tmp6
tmp9 = tmp8 - tmp1
tmp10 = tmp9 * tmp9
tmp11 = tmp7 + tmp10
tmp13 = tmp12 - tmp1
tmp14 = tmp13 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = 1.0
tmp17 = tmp15 * tmp16
tmp18 = tmp17 + tmp16
tmp19 = tl.full([1], 1, tl.int32)
tmp20 = tmp19 / tmp18
tmp21 = tmp20 * tmp16
tmp22 = tmp21 / tmp21
tl.store(in_out_ptr0 + x2, tmp22, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_reciprocal_sub_sum_0[grid(64)](buf1,
primals_1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1)
return buf1, primals_1, primals_2
class ClusterAssignmentNew(nn.Module):
def __init__(self, cluster_number: 'int', embedding_dimension: 'int',
alpha: 'float'=1.0, cluster_centers: 'Optional[torch.Tensor]'=None
) ->None:
"""
Module to handle the soft assignment, for a description see in 3.1.1. in Xie/Girshick/Farhadi,
where the Student's t-distribution is used measure similarity between feature vector and each
cluster centroid.
:param cluster_number: number of clusters
:param embedding_dimension: embedding dimension of feature vectors
:param alpha: parameter representing the degrees of freedom in the t-distribution, default 1.0
:param cluster_centers: clusters centers to initialise, if None then use Xavier uniform
"""
super(ClusterAssignmentNew, self).__init__()
self.embedding_dimension = embedding_dimension
self.cluster_number = cluster_number
self.alpha = alpha
if cluster_centers is None:
initial_cluster_centers = torch.zeros(self.cluster_number, self
.embedding_dimension, dtype=torch.float)
nn.init.xavier_uniform_(initial_cluster_centers)
else:
initial_cluster_centers = cluster_centers
self.cluster_centers = Parameter(initial_cluster_centers)
def forward(self, input_0):
primals_2 = self.cluster_centers
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| zhyhan/pt-dec | ClusterAssignment | false | 13,179 | [
"MIT"
]
| 0 | 52aef59e508c8e7ffdde0fd7bea84570a7571b2a | https://github.com/zhyhan/pt-dec/tree/52aef59e508c8e7ffdde0fd7bea84570a7571b2a |
Similarity | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/pt/cptoooyd27oydfjdutzflkkwudracp4vhtyyiielinzms2jqrkrt.py
# Topologically Sorted Source Nodes: [vec_dist], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# vec_dist => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%mul, %abs_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 * tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 8, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr0 + ((4*x1) + ((-4) + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp14 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = -tmp14
tmp16 = tmp13 + tmp15
tmp17 = tl_math.abs(tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp10, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp9, tmp19)
tl.store(out_ptr0 + (x2), tmp20, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/x7/cx7xziu4lpr42gzh3hblzhyhhr2agimvsluvyrub77hqbwauajw5.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# out => sigmoid
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_4), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/gd/cgdq755g3clp3t5icrbudwx4ir4xygtoz6ug4jo2euegtyg5mdnp.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# out_1 => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/j5/cj5hwj7ockkcleq56wmrpwxavcu7lllqodtnsxnd6sbzznn7lu6j.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# out_1 => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_3 = async_compile.triton('triton_poi_fused__log_softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [vec_dist], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_2, primals_1, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_1.run(buf2, primals_4, 16, grid=grid(16), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_6
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_2.run(buf3, buf4, 16, grid=grid(16), stream=stream0)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_3.run(buf4, buf5, 16, grid=grid(16), stream=stream0)
del buf4
return (buf5, buf0, buf2, buf5, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Similarity(nn.Module):
def __init__(self, cuda, mem_dim, hidden_dim, num_classes):
super(Similarity, self).__init__()
self.cudaFlag = cuda
self.mem_dim = mem_dim
self.hidden_dim = hidden_dim
self.num_classes = num_classes
self.wh = nn.Linear(2 * self.mem_dim, self.hidden_dim)
self.wp = nn.Linear(self.hidden_dim, self.num_classes)
def forward(self, lvec, rvec):
mult_dist = F.torch.mul(lvec, rvec)
abs_dist = F.torch.abs(F.torch.add(lvec, -rvec))
vec_dist = F.torch.cat((mult_dist, abs_dist), 1)
out = F.sigmoid(self.wh(vec_dist))
out = F.log_softmax(self.wp(out))
return out
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'cuda': False, 'mem_dim': 4, 'hidden_dim': 4,
'num_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 * tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp13 = tl.load(in_ptr0 + (4 * x1 + (-4 + x0)), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp14 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = -tmp14
tmp16 = tmp13 + tmp15
tmp17 = tl_math.abs(tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp10, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp9, tmp19)
tl.store(out_ptr0 + x2, tmp20, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_2, primals_1, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8
), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_sigmoid_1[grid(16)](buf2, primals_4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_6
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__log_softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused__log_softmax_3[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf4
return buf5, buf0, buf2, buf5, primals_5
class SimilarityNew(nn.Module):
def __init__(self, cuda, mem_dim, hidden_dim, num_classes):
super(SimilarityNew, self).__init__()
self.cudaFlag = cuda
self.mem_dim = mem_dim
self.hidden_dim = hidden_dim
self.num_classes = num_classes
self.wh = nn.Linear(2 * self.mem_dim, self.hidden_dim)
self.wp = nn.Linear(self.hidden_dim, self.num_classes)
def forward(self, input_0, input_1):
primals_3 = self.wh.weight
primals_4 = self.wh.bias
primals_1 = self.wp.weight
primals_6 = self.wp.bias
primals_2 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| zhu-y11/multilingual_treelstm | Similarity | false | 13,180 | [
"MIT"
]
| 0 | 39c211f3c03db733f776aa8fe73cd615aaa47465 | https://github.com/zhu-y11/multilingual_treelstm/tree/39c211f3c03db733f776aa8fe73cd615aaa47465 |
NonLocalBlock2D | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/p6/cp6eigwdwvuwkw7yu23b3vdln77bh54i4fjrvzzov2pds53nlqfx.py
# Topologically Sorted Source Nodes: [f_div_C], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# f_div_C => amax, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (256*x1)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/d7/cd7apysgloqtxrxqx7dlrx4zy3cpnbngbjn5huj2s4wjwnfthvnl.py
# Topologically Sorted Source Nodes: [f_div_C], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# f_div_C => div, exp, sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 256)
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 / tmp4
tl.store(in_out_ptr0 + (x3), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/b2/cb25ngvlgfvwhmsm4qqvqq5lmve3fy4bkcembhrej27i4rplli36.py
# Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# y_1 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tl.store(out_ptr0 + (x2 + (16*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/fj/cfjx4la4yfm6lei3rszxozl4x2jpdszxjumi7uslqcoqfnjq6dz4.py
# Topologically Sorted Source Nodes: [W_y, z], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# W_y => convolution_3
# z => add
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_9, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_3, %primals_1), kwargs = {})
triton_poi_fused_add_convolution_4 = async_compile.triton('triton_poi_fused_add_convolution_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf4, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
buf5 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [f], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf4, (4, 4, 16), (64, 16, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 1, 16), (16, 64, 1), torch.float32)
buf7 = empty_strided_cuda((4, 1, 16), (16, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [f_div_C], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf5, buf6, buf7, 64, 16, grid=grid(64), stream=stream0)
buf8 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [f_div_C], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf8, buf6, buf7, 1024, grid=grid(1024), stream=stream0)
del buf6
del buf7
buf9 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf9, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
buf10 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.bmm]
extern_kernels.bmm(buf8, reinterpret_tensor(buf9, (4, 16, 4), (64, 1, 16), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf10, buf11, 16, 16, grid=grid(16, 16), stream=stream0)
del buf10
# Topologically Sorted Source Nodes: [W_y], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 4, 4), (64, 16, 4, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [W_y, z], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_4.run(buf13, primals_9, primals_1, 256, grid=grid(256), stream=stream0)
del primals_9
return (buf13, primals_1, primals_2, primals_4, primals_6, primals_8, buf8, reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf9, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo
class NonLocalBlock2D(nn.Module):
def __init__(self, in_channels, inter_channels):
super(NonLocalBlock2D, self).__init__()
self.in_channels = in_channels
self.inter_channels = inter_channels
self.g = nn.Conv2d(in_channels=self.in_channels, out_channels=self.
inter_channels, kernel_size=1, stride=1, padding=0)
self.W = nn.Conv2d(in_channels=self.inter_channels, out_channels=
self.in_channels, kernel_size=1, stride=1, padding=0)
nn.init.constant(self.W.weight, 0)
nn.init.constant(self.W.bias, 0)
self.theta = nn.Conv2d(in_channels=self.in_channels, out_channels=
self.inter_channels, kernel_size=1, stride=1, padding=0)
self.phi = nn.Conv2d(in_channels=self.in_channels, out_channels=
self.inter_channels, kernel_size=1, stride=1, padding=0)
def forward(self, x):
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
f = torch.matmul(theta_x, phi_x)
f_div_C = F.softmax(f, dim=1)
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
return z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'inter_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.model_zoo
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 256 * x1), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 256
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 / tmp4
tl.store(in_out_ptr0 + x3, tmp5, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tl.store(out_ptr0 + (x2 + 16 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_convolution_4(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = extern_kernels.convolution(primals_1, primals_6, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf3, primals_5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = buf2
del buf2
triton_poi_fused_convolution_0[grid(256)](buf4, primals_7, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf5 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (4, 16, 4), (64, 1, 16),
0), reinterpret_tensor(buf4, (4, 4, 16), (64, 16, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 1, 16), (16, 64, 1), torch.float32)
buf7 = empty_strided_cuda((4, 1, 16), (16, 64, 1), torch.float32)
triton_per_fused__softmax_1[grid(64)](buf5, buf6, buf7, 64, 16,
XBLOCK=32, num_warps=4, num_stages=1)
buf8 = buf5
del buf5
triton_poi_fused__softmax_2[grid(1024)](buf8, buf6, buf7, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del buf6
del buf7
buf9 = buf0
del buf0
triton_poi_fused_convolution_0[grid(256)](buf9, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf10 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(buf8, reinterpret_tensor(buf9, (4, 16, 4), (64,
1, 16), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_clone_3[grid(16, 16)](buf10, buf11, 16, 16, XBLOCK
=16, YBLOCK=16, num_warps=4, num_stages=1)
del buf10
buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (4, 4,
4, 4), (64, 16, 4, 1), 0), primals_8, stride=(1, 1), padding=(0,
0), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 4, 4), (64, 16, 4, 1))
buf13 = buf12
del buf12
triton_poi_fused_add_convolution_4[grid(256)](buf13, primals_9,
primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
return (buf13, primals_1, primals_2, primals_4, primals_6, primals_8,
buf8, reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 16, 4, 1), 0),
reinterpret_tensor(buf9, (4, 4, 16), (64, 16, 1), 0),
reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0),
reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0))
class NonLocalBlock2DNew(nn.Module):
def __init__(self, in_channels, inter_channels):
super(NonLocalBlock2DNew, self).__init__()
self.in_channels = in_channels
self.inter_channels = inter_channels
self.g = nn.Conv2d(in_channels=self.in_channels, out_channels=self.
inter_channels, kernel_size=1, stride=1, padding=0)
self.W = nn.Conv2d(in_channels=self.inter_channels, out_channels=
self.in_channels, kernel_size=1, stride=1, padding=0)
nn.init.constant(self.W.weight, 0)
nn.init.constant(self.W.bias, 0)
self.theta = nn.Conv2d(in_channels=self.in_channels, out_channels=
self.inter_channels, kernel_size=1, stride=1, padding=0)
self.phi = nn.Conv2d(in_channels=self.in_channels, out_channels=
self.inter_channels, kernel_size=1, stride=1, padding=0)
def forward(self, input_0):
primals_2 = self.g.weight
primals_3 = self.g.bias
primals_4 = self.W.weight
primals_5 = self.W.bias
primals_6 = self.theta.weight
primals_7 = self.theta.bias
primals_8 = self.phi.weight
primals_9 = self.phi.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| zhouhuanxiang/EDSR-PyTorch | NonLocalBlock2D | false | 13,181 | [
"MIT"
]
| 0 | ca2f0eea49476a0acde59dd76aa4ae257389d98c | https://github.com/zhouhuanxiang/EDSR-PyTorch/tree/ca2f0eea49476a0acde59dd76aa4ae257389d98c |
Value | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/a2/ca2wr2cvkya5clovpxidv7ia56pdcyp7uq4omtpg5m2nr7ya3ryn.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# x => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (1, 64), (64, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_2, 4096, grid=grid(4096), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh]
triton_poi_fused_tanh_0.run(buf3, primals_5, 4096, grid=grid(4096), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [state_values], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf5)
del primals_7
return (reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Value(nn.Module):
def __init__(self, num_inputs):
super(Value, self).__init__()
self.affine1 = nn.Linear(num_inputs, 64)
self.affine2 = nn.Linear(64, 64)
self.value_head = nn.Linear(64, 1)
self.value_head.weight.data.mul_(0.1)
self.value_head.bias.data.mul_(0.0)
def forward(self, x):
x = torch.tanh(self.affine1(x))
x = torch.tanh(self.affine2(x))
state_values = self.value_head(x)
return state_values
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (1, 64), (64, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(4096)](buf1, primals_2, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf2
triton_poi_fused_tanh_0[grid(4096)](buf3, primals_5, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_6, (64, 1), (1, 64), 0),
alpha=1, beta=1, out=buf5)
del primals_7
return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, primals_6, primals_4
class ValueNew(nn.Module):
def __init__(self, num_inputs):
super(ValueNew, self).__init__()
self.affine1 = nn.Linear(num_inputs, 64)
self.affine2 = nn.Linear(64, 64)
self.value_head = nn.Linear(64, 1)
self.value_head.weight.data.mul_(0.1)
self.value_head.bias.data.mul_(0.0)
def forward(self, input_0):
primals_1 = self.affine1.weight
primals_2 = self.affine1.bias
primals_4 = self.affine2.weight
primals_5 = self.affine2.bias
primals_6 = self.value_head.weight
primals_7 = self.value_head.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| zwc662/Safe_GAIL | Value | false | 13,182 | [
"MIT"
]
| 0 | 536dd73c91d277b418ef04efdd42aa6c87fdad33 | https://github.com/zwc662/Safe_GAIL/tree/536dd73c91d277b418ef04efdd42aa6c87fdad33 |
AutoEncoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/bg/cbgse6huxiz3z3nj62gjowaky5xqc2ixg2z47qxe6shy2qxm6db7.py
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# tanh => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 100
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/hj/chjzotk5iydxvuetxetlv36s7car7cdb24whkuqihxwcy5kkr4o2.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# out => tanh_1
# Graph fragment:
# %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_3,), kwargs = {})
triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (100, 4), (4, 1))
assert_size_stride(primals_2, (100, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 100), (100, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_2, 6400, grid=grid(6400), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(primals_4, (100, 4), (1, 100), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.tanh]
triton_poi_fused_tanh_1.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((100, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 100), (100, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class AutoEncoder(nn.Module):
def __init__(self, num_question, k=100):
""" Initialize a class AutoEncoder.
:param num_question: int
:param k: int
"""
super(AutoEncoder, self).__init__()
self.g = nn.Linear(num_question, k)
self.h = nn.Linear(k, num_question)
def get_weight_norm(self):
""" Return ||W^1|| + ||W^2||.
:return: float
"""
g_w_norm = torch.norm(self.g.weight, 2)
h_w_norm = torch.norm(self.h.weight, 2)
return g_w_norm + h_w_norm
def forward(self, inputs):
""" Return a forward pass given inputs.
:param inputs: user vector.
:return: user vector.
"""
tanh = nn.Tanh()
out = tanh(self.h(tanh(self.g(inputs))))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_question': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 100
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (100, 4), (4, 1))
assert_size_stride(primals_2, (100,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 100), (100, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(6400)](buf1, primals_2, 6400, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 100), (100, 1), 0),
reinterpret_tensor(primals_4, (100, 4), (1, 100), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_tanh_1[grid(256)](buf3, primals_5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, primals_4
class AutoEncoderNew(nn.Module):
def __init__(self, num_question, k=100):
""" Initialize a class AutoEncoder.
:param num_question: int
:param k: int
"""
super(AutoEncoderNew, self).__init__()
self.g = nn.Linear(num_question, k)
self.h = nn.Linear(k, num_question)
def get_weight_norm(self):
""" Return ||W^1|| + ||W^2||.
:return: float
"""
g_w_norm = torch.norm(self.g.weight, 2)
h_w_norm = torch.norm(self.h.weight, 2)
return g_w_norm + h_w_norm
def forward(self, input_0):
primals_1 = self.g.weight
primals_2 = self.g.bias
primals_4 = self.h.weight
primals_5 = self.h.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| zuoyuwang/ML-Correctness-prediction | AutoEncoder | false | 13,183 | [
"MIT"
]
| 0 | 15180b73567e61cc7a5dd61b0202a42eca808734 | https://github.com/zuoyuwang/ML-Correctness-prediction/tree/15180b73567e61cc7a5dd61b0202a42eca808734 |
ImgPatches | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/vo/cvo4iu6rn2jljwd7fn6tqzwns5mcxrufyjzpv2b3qybl4vhvc3h2.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2304
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (48*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/4m/c4mshxu4bq3hduvpdghbp5cvwf5wm5cau2c3c2hmzusgbrcciu2t.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 256], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 3072
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 768
y1 = (yindex // 768)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (768*x2) + (196608*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (256*y3)), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (768, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_2, (768, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((768, 3, 4, 4), (48, 1, 12, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 2304, 16, grid=grid(2304, 16), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, buf0, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 768, 16, 16), (196608, 1, 12288, 768))
buf3 = empty_strided_cuda((4, 768, 16, 16), (196608, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf2, primals_2, buf3, 3072, 256, grid=grid(3072, 256), stream=stream0)
del buf2
del primals_2
return (reinterpret_tensor(buf3, (4, 256, 768), (196608, 1, 256), 0), buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((768, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((768, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class ImgPatches(nn.Module):
def __init__(self, input_channel=3, dim=768, patch_size=4):
super().__init__()
self.patch_embed = nn.Conv2d(input_channel, dim, kernel_size=
patch_size, stride=patch_size)
def forward(self, img):
patches = self.patch_embed(img).flatten(2).transpose(1, 2)
return patches
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 2304
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 48 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 768
y1 = yindex // 768
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 768 * x2 + 196608 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 256 * y3), tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (768, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_2, (768,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((768, 3, 4, 4), (48, 1, 12, 3), torch.float32
)
get_raw_stream(0)
triton_poi_fused_0[grid(2304, 16)](primals_1, buf0, 2304, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, buf0, stride=(4, 4),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 768, 16, 16), (196608, 1, 12288, 768))
buf3 = empty_strided_cuda((4, 768, 16, 16), (196608, 256, 16, 1),
torch.float32)
triton_poi_fused_convolution_2[grid(3072, 256)](buf2, primals_2,
buf3, 3072, 256, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf2
del primals_2
return reinterpret_tensor(buf3, (4, 256, 768), (196608, 1, 256), 0
), buf0, buf1
class ImgPatchesNew(nn.Module):
def __init__(self, input_channel=3, dim=768, patch_size=4):
super().__init__()
self.patch_embed = nn.Conv2d(input_channel, dim, kernel_size=
patch_size, stride=patch_size)
def forward(self, input_0):
primals_1 = self.patch_embed.weight
primals_2 = self.patch_embed.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| zoosecretbase/TransGAN | ImgPatches | false | 13,184 | [
"MIT"
]
| 0 | f2546aec5b80bdddb2c8621a6e011532df3e2d73 | https://github.com/zoosecretbase/TransGAN/tree/f2546aec5b80bdddb2c8621a6e011532df3e2d73 |
SentenceClassificationModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/6s/c6sstbvcita246hkfqwdeatnmsh3e6vlcncrzcwlsoqg7dmxvabp.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x_2 => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%addmm, [1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/3l/c3l5to5llpnh3khiljbdg5qfpwxqxoo3v4y4cqj6w2pmgbxmmkgx.py
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.native_layer_norm, aten.relu]
# Source node to ATen node mapping:
# x_2 => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# x_3 => relu
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%addmm, [1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_5), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
triton_poi_fused_native_layer_norm_relu_1 = async_compile.triton('triton_poi_fused_native_layer_norm_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/b7/cb7iq44xucvx4o4uio3etz5hrrkllxx5igr3vjyglpwcku6mi232.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# x_6 => sigmoid
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(buf0, buf1, buf2, 4, grid=grid(4), stream=stream0)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.native_layer_norm, aten.relu]
triton_poi_fused_native_layer_norm_relu_1.run(buf0, buf1, buf2, primals_4, primals_5, buf3, 16, grid=grid(16), stream=stream0)
del buf1
del primals_5
buf4 = reinterpret_tensor(buf2, (4, 1), (1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_2.run(buf5, primals_7, 4, grid=grid(4), stream=stream0)
del primals_7
return (buf5, primals_4, primals_1, buf0, buf3, buf5, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
from torch.nn import functional as F
import torch.onnx
class SentenceClassificationModule(Module):
def __init__(self, input_dimensions: 'int', hidden_dimensions: 'int',
dropout: 'float'=0.3):
super().__init__()
self.layer_1 = torch.nn.Linear(input_dimensions, hidden_dimensions)
self.layer_2 = torch.nn.Linear(hidden_dimensions, 1)
self.dropout = torch.nn.Dropout(p=dropout)
self.norm = torch.nn.LayerNorm(hidden_dimensions)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.layer_1(x)
x = self.norm(x)
x = F.relu(x)
x = self.dropout(x)
x = self.layer_2(x)
x = torch.sigmoid(x)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_dimensions': 4, 'hidden_dimensions': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(
primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(4)](buf0, buf1, buf2, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_relu_1[grid(16)](buf0, buf1,
buf2, primals_4, primals_5, buf3, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf1
del primals_5
buf4 = reinterpret_tensor(buf2, (4, 1), (1, 1), 0)
del buf2
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (4, 1), (1, 4
), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_sigmoid_2[grid(4)](buf5, primals_7, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_7
return buf5, primals_4, primals_1, buf0, buf3, buf5, primals_6
class SentenceClassificationModuleNew(Module):
def __init__(self, input_dimensions: 'int', hidden_dimensions: 'int',
dropout: 'float'=0.3):
super().__init__()
self.layer_1 = torch.nn.Linear(input_dimensions, hidden_dimensions)
self.layer_2 = torch.nn.Linear(hidden_dimensions, 1)
self.dropout = torch.nn.Dropout(p=dropout)
self.norm = torch.nn.LayerNorm(hidden_dimensions)
def forward(self, input_0):
primals_1 = self.layer_1.weight
primals_3 = self.layer_1.bias
primals_6 = self.layer_2.weight
primals_7 = self.layer_2.bias
primals_4 = self.norm.weight
primals_5 = self.norm.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| zolekode/flexudy-multilingual-grammar-checker | SentenceClassificationModule | false | 13,185 | [
"Apache-2.0"
]
| 0 | 86ea35acff0b8eea49d9b1ff9193b69eabc26ef9 | https://github.com/zolekode/flexudy-multilingual-grammar-checker/tree/86ea35acff0b8eea49d9b1ff9193b69eabc26ef9 |
ScaledDotProductAttention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/nu/cnuc7ivckuuly7yn2763pwt3sw72jd6vuwpeeu4sfespm5iz7fq4.py
# Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# p_attn => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/fj/cfjl47pvhwbpfbvh6rfehwy5ijxc5p3zgkld2lwf3mw5bl6pbkak.py
# Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# p_attn => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3)
del arg2_1
return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
"""
Compute 'Scaled Dot Product Attention'
"""
def __init__(self, dropout=0.0):
"""
:param dropout: attention dropout rate
"""
super().__init__()
self.dropout = dropout
def forward(self, query, key, value, mask=None):
"""
:param query: (batch_num, query_length, d_model)
:param key: (batch_num, key_length, d_model)
:param value: (batch_num, key_length, d_model)
"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill_(mask == 0, -1000000000.0)
p_attn = F.softmax(scores, dim=-1)
p_attn = F.dropout(p_attn, p=self.dropout)
return torch.matmul(p_attn, value), p_attn
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0),
out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3
)
del arg2_1
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2
class ScaledDotProductAttentionNew(nn.Module):
"""
Compute 'Scaled Dot Product Attention'
"""
def __init__(self, dropout=0.0):
"""
:param dropout: attention dropout rate
"""
super().__init__()
self.dropout = dropout
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
| timgianitsos/squad | ScaledDotProductAttention | false | 13,186 | [
"MIT"
]
| 0 | 6ab502652e3528cfeeddfb8eba05221443a35294 | https://github.com/timgianitsos/squad/tree/6ab502652e3528cfeeddfb8eba05221443a35294 |
AdaIN2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/hi/chiraaxnmh55qd4kuoiitqh5wnd7lwamannokifrtxk54x6sfwpd.py
# Topologically Sorted Source Nodes: [x, add, mul, add_1], Original ATen: [aten._native_batch_norm_legit, aten.add, aten.mul]
# Source node to ATen node mapping:
# add => add_1
# add_1 => add_2
# mul => mul_1
# x => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_1, 1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %add_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %getitem), kwargs = {})
triton_per_fused__native_batch_norm_legit_add_mul_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r2 = rindex % 4
r3 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (4 + r2 + (8*r3)), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (4 + r2), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr1 + (r2 + (8*r3)), None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr2 + (r2), None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 * tmp21
tmp26 = tmp24 + tmp25
tmp27 = 1.0
tmp28 = tmp26 + tmp27
tmp29 = tmp23 * tmp28
tmp32 = tmp30 + tmp31
tmp33 = tmp29 + tmp32
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp21, xmask)
tl.store(out_ptr1 + (r1 + (16*x0)), tmp33, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf4 = reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, add, mul, add_1], Original ATen: [aten._native_batch_norm_legit, aten.add, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_add_mul_0.run(buf4, primals_4, buf0, primals_2, buf1, buf5, 16, 16, grid=grid(16), stream=stream0)
del buf0
del primals_2
return (buf5, primals_3, primals_4, buf1, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class AdaIN2d(nn.Module):
def __init__(self, in_channels, in_features):
super(AdaIN2d, self).__init__()
self.norm = nn.InstanceNorm2d(in_channels, affine=False,
track_running_stats=False)
self.net = nn.Linear(in_features, 2 * in_channels)
self.reset_parameters()
def forward(self, x, h):
h = self.net(h)
bs, fs = h.size()
h.view(bs, fs, 1, 1)
b, s = h.chunk(2, 1)
x = self.norm(x)
return x * (s + 1) + b
def reset_parameters(self):
nn.init.constant_(self.net.weight, 0.0)
nn.init.constant_(self.net.bias, 0.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'in_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r2 = rindex % 4
r3 = rindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (4 + r2 + 8 * r3), None, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr2 + (4 + r2), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr1 + (r2 + 8 * r3), None, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr2 + r2, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 * tmp21
tmp26 = tmp24 + tmp25
tmp27 = 1.0
tmp28 = tmp26 + tmp27
tmp29 = tmp23 * tmp28
tmp32 = tmp30 + tmp31
tmp33 = tmp29 + tmp32
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 16 * x0), tmp33, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 8),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf4 = reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_add_mul_0[grid(16)](buf4,
primals_4, buf0, primals_2, buf1, buf5, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del buf0
del primals_2
return buf5, primals_3, primals_4, buf1, buf4
class AdaIN2dNew(nn.Module):
def __init__(self, in_channels, in_features):
super(AdaIN2dNew, self).__init__()
self.norm = nn.InstanceNorm2d(in_channels, affine=False,
track_running_stats=False)
self.net = nn.Linear(in_features, 2 * in_channels)
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.net.weight, 0.0)
nn.init.constant_(self.net.bias, 0.0)
def forward(self, input_0, input_1):
primals_1 = self.net.weight
primals_2 = self.net.bias
primals_4 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| wp03052/wolf | AdaIN2d | false | 13,187 | [
"Apache-2.0"
]
| 0 | 49a582cafb829a2642db360c7d94c21439247ec7 | https://github.com/wp03052/wolf/tree/49a582cafb829a2642db360c7d94c21439247ec7 |
Policy | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/a2/ca2wr2cvkya5clovpxidv7ia56pdcyp7uq4omtpg5m2nr7ya3ryn.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# x => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/53/c5336tes3fejn37nhb2iijuur7spy3qcasflywbbqklxwgjxpcvr.py
# Topologically Sorted Source Nodes: [action_std], Original ATen: [aten.exp]
# Source node to ATen node mapping:
# action_std => exp
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%expand,), kwargs = {})
triton_poi_fused_exp_1 = async_compile.triton('triton_poi_fused_exp_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_exp_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tl.store(out_ptr0 + (x2), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (4, 64), (64, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_2, 4096, grid=grid(4096), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh]
triton_poi_fused_tanh_0.run(buf3, primals_5, 4096, grid=grid(4096), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [action_mean], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [action_std], Original ATen: [aten.exp]
triton_poi_fused_exp_1.run(primals_8, buf5, 256, grid=grid(256), stream=stream0)
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_8, (4, 4, 4, 4), (0, 0, 0, 1), 0), buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, buf5, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Policy(nn.Module):
def __init__(self, num_inputs, num_outputs, discrete=False):
super(Policy, self).__init__()
self.discrete = discrete
self.affine1 = nn.Linear(num_inputs, 64)
self.affine2 = nn.Linear(64, 64)
self.action_mean = nn.Linear(64, num_outputs)
self.action_mean.weight.data.mul_(0.1)
self.action_mean.bias.data.mul_(0.0)
if self.discrete:
self.action_preds = nn.Softmax()
self.action_log_std = nn.Parameter(torch.zeros(1, num_outputs))
self.saved_actions = []
self.rewards = []
self.final_value = 0
def forward(self, x):
x = torch.tanh(self.affine1(x))
x = torch.tanh(self.affine2(x))
action_mean = self.action_mean(x)
if self.discrete:
action_mean = torch.sigmoid(action_mean)
action_mean = self.action_preds(action_mean)
action_log_std = self.action_log_std.expand_as(action_mean)
action_std = torch.exp(action_log_std)
return action_mean, action_log_std, action_std
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_outputs': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, None)
@triton.jit
def triton_poi_fused_exp_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tl.store(out_ptr0 + x2, tmp1, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (4, 64), (64, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(4096)](buf1, primals_2, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf2
triton_poi_fused_tanh_0[grid(4096)](buf3, primals_5, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_exp_1[grid(256)](primals_8, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_8, (4, 4, 4, 4), (0, 0, 0, 1), 0
), buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, buf5, primals_6, primals_4
class PolicyNew(nn.Module):
def __init__(self, num_inputs, num_outputs, discrete=False):
super(PolicyNew, self).__init__()
self.discrete = discrete
self.affine1 = nn.Linear(num_inputs, 64)
self.affine2 = nn.Linear(64, 64)
self.action_mean = nn.Linear(64, num_outputs)
self.action_mean.weight.data.mul_(0.1)
self.action_mean.bias.data.mul_(0.0)
if self.discrete:
self.action_preds = nn.Softmax()
self.action_log_std = nn.Parameter(torch.zeros(1, num_outputs))
self.saved_actions = []
self.rewards = []
self.final_value = 0
def forward(self, input_0):
primals_8 = self.action_log_std
primals_1 = self.affine1.weight
primals_2 = self.affine1.bias
primals_4 = self.affine2.weight
primals_5 = self.affine2.bias
primals_6 = self.action_mean.weight
primals_7 = self.action_mean.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1], output[2]
| zwc662/Safe_GAIL | Policy | false | 13,188 | [
"MIT"
]
| 0 | 536dd73c91d277b418ef04efdd42aa6c87fdad33 | https://github.com/zwc662/Safe_GAIL/tree/536dd73c91d277b418ef04efdd42aa6c87fdad33 |
MIRB3 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/34/c34lnuw3wok4euydis4jz2cumdv5zl53hr2km2mr6sokjcomm2j6.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm => pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_3, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1, 2, 3], True), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
triton_poi_fused__weight_norm_interface_0 = async_compile.triton('triton_poi_fused__weight_norm_interface_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (6*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (6*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (6*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (6*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (4 + (6*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (5 + (6*x0)), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tl.store(out_ptr0 + (x0), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/4h/c4hyivh4rlcg5ap7pj7xbbqscs2u2jjb4g5w4jaidlc7wpyz7lbi.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm => div, mul
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %pow_2), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %div), kwargs = {})
triton_poi_fused__weight_norm_interface_1 = async_compile.triton('triton_poi_fused__weight_norm_interface_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 108
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 6)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ij/cijtnciymslu5jmlqs4skpm4lp2nibv5hh3th6b5i2szhhqwwhmh.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %mul, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 3), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 294912
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 18
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/mz/cmzalccs4mduopsp2yd4zky3wwaeepvbubqparxslix3sntpfkub.py
# Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm_1 => div_1, mul_1, pow_3, pow_4, sum_2
# Graph fragment:
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_6, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1, 2, 3], True), kwargs = {})
# %pow_4 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_5, %pow_4), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_6, %div_1), kwargs = {})
triton_per_fused__weight_norm_interface_3 = async_compile.triton('triton_per_fused__weight_norm_interface_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[32, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__weight_norm_interface_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 18
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (9*x0)), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr0 + (r1 + (9*x0)), tmp9, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/dy/cdylggyn2ws3sfvdukozllsfp2dvd7jch6eitsmkngzuzggssgxt.py
# Topologically Sorted Source Nodes: [_weight_norm_2], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm_2 => div_2, mul_2, pow_5, pow_6, sum_3
# Graph fragment:
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_9, 2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [1, 2, 3], True), kwargs = {})
# %pow_6 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 0.5), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_8, %pow_6), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_9, %div_2), kwargs = {})
triton_per_fused__weight_norm_interface_4 = async_compile.triton('triton_per_fused__weight_norm_interface_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 32],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__weight_norm_interface_4(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 9
rnumel = 18
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (18*x0)), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr0 + (r1 + (18*x0)), tmp9, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/4r/c4rv2lnvemalwfmwemwkkjuqab3eovephhf3dkwsqaqrzbzurn6e.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%where, %where_1], 1), kwargs = {})
triton_poi_fused_cat_5 = async_compile.triton('triton_poi_fused_cat_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 294912
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 18
x0 = xindex % 4096
x2 = (xindex // 73728)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 9, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (36864*x2)), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = 0.0
tmp9 = tmp7 > tmp8
tmp10 = 0.2
tmp11 = tmp7 * tmp10
tmp12 = tl.where(tmp9, tmp7, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 18, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tl.load(in_ptr2 + (x0 + (4096*((-9) + x1)) + (36864*x2)), tmp15, other=0.0)
tmp19 = tl.load(in_ptr3 + ((-9) + x1), tmp15, eviction_policy='evict_last', other=0.0)
tmp20 = tmp18 + tmp19
tmp21 = tmp20 > tmp8
tmp22 = tmp20 * tmp10
tmp23 = tl.where(tmp21, tmp20, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp15, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp14, tmp25)
tl.store(out_ptr0 + (x3), tmp26, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/nt/cntzp5nakwvul7tcparxahaerbizrgojsic6at2feukyiwjvutbv.py
# Topologically Sorted Source Nodes: [_weight_norm_18], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm_18 => div_18, mul_24, pow_37, pow_38, sum_19
# Graph fragment:
# %pow_37 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_57, 2), kwargs = {})
# %sum_19 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_37, [1, 2, 3], True), kwargs = {})
# %pow_38 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_19, 0.5), kwargs = {})
# %div_18 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_56, %pow_38), kwargs = {})
# %mul_24 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_57, %div_18), kwargs = {})
triton_per_fused__weight_norm_interface_6 = async_compile.triton('triton_per_fused__weight_norm_interface_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[32, 32],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__weight_norm_interface_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 18
rnumel = 18
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (18*x0)), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr0 + (r1 + (18*x0)), tmp9, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/yr/cyrzws2lnjde2as7gzejnwjcj4lymm7bgicdqzp4zy6p5j7qtkpv.py
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# out => convolution_18
# out_1 => add
# Graph fragment:
# %convolution_18 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_4, %mul_24, %primals_58, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_18, %primals_1), kwargs = {})
triton_poi_fused_add_convolution_7 = async_compile.triton('triton_poi_fused_add_convolution_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 294912
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 18
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/al/callo7bieu2ffgjrumilpfnh66yj2nuuap5g7jnjsb3x7g3p5ot7.py
# Topologically Sorted Source Nodes: [x_17, c2_4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# c2_4 => gt_5, mul_23, where_5
# x_17 => convolution_17
# Graph fragment:
# %convolution_17 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_16, %mul_22, %primals_55, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_17, 0), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_17, 0.2), kwargs = {})
# %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %convolution_17, %mul_23), kwargs = {})
# %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_5, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 147456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 9
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + (x3), tmp8, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58 = args
args.clear()
assert_size_stride(primals_1, (4, 18, 64, 64), (73728, 4096, 64, 1))
assert_size_stride(primals_2, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_3, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_4, (18, ), (1, ))
assert_size_stride(primals_5, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_6, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_7, (18, ), (1, ))
assert_size_stride(primals_8, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_9, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_10, (9, ), (1, ))
assert_size_stride(primals_11, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_12, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_13, (18, ), (1, ))
assert_size_stride(primals_14, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_15, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_16, (18, ), (1, ))
assert_size_stride(primals_17, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_18, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_19, (9, ), (1, ))
assert_size_stride(primals_20, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_21, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_22, (18, ), (1, ))
assert_size_stride(primals_23, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_24, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_25, (18, ), (1, ))
assert_size_stride(primals_26, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_27, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_28, (9, ), (1, ))
assert_size_stride(primals_29, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_30, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_31, (18, ), (1, ))
assert_size_stride(primals_32, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_33, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_34, (18, ), (1, ))
assert_size_stride(primals_35, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_36, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_37, (9, ), (1, ))
assert_size_stride(primals_38, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_39, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_40, (18, ), (1, ))
assert_size_stride(primals_41, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_42, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_43, (18, ), (1, ))
assert_size_stride(primals_44, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_45, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_46, (9, ), (1, ))
assert_size_stride(primals_47, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_48, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_49, (18, ), (1, ))
assert_size_stride(primals_50, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_51, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_52, (18, ), (1, ))
assert_size_stride(primals_53, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_54, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_55, (9, ), (1, ))
assert_size_stride(primals_56, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_57, (18, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_58, (18, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
stream0 = get_raw_stream(0)
triton_poi_fused__weight_norm_interface_0.run(primals_3, buf0, 18, grid=grid(18), stream=stream0)
buf1 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_3, primals_2, buf0, buf1, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_1, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf2, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf3, primals_4, 294912, grid=grid(294912), stream=stream0)
del primals_4
buf4 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf5 = reinterpret_tensor(buf4, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf4 # reuse
buf6 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf5, primals_6, primals_5, buf6, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf3, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf7, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf8, primals_7, 294912, grid=grid(294912), stream=stream0)
del primals_7
buf9 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf10 = reinterpret_tensor(buf9, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf9 # reuse
buf11 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_2], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf10, primals_9, primals_8, buf11, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf8, buf11, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf13 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_3], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_12, buf13, 18, grid=grid(18), stream=stream0)
buf14 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_3], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_12, primals_11, buf13, buf14, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf15 = extern_kernels.convolution(primals_1, buf14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf15, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf16 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf16, primals_13, 294912, grid=grid(294912), stream=stream0)
del primals_13
buf17 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf18 = reinterpret_tensor(buf17, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf17 # reuse
buf19 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_4], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf18, primals_15, primals_14, buf19, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf16, buf19, stride=(1, 1), padding=(3, 3), dilation=(3, 3), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf20, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf21, primals_16, 294912, grid=grid(294912), stream=stream0)
del primals_16
buf22 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf23 = reinterpret_tensor(buf22, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf22 # reuse
buf24 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_5], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf23, primals_18, primals_17, buf24, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution]
buf25 = extern_kernels.convolution(buf21, buf24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf26 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf12, primals_10, buf25, primals_19, buf26, 294912, grid=grid(294912), stream=stream0)
buf27 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_6], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_21, buf27, 18, grid=grid(18), stream=stream0)
buf28 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_6], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_21, primals_20, buf27, buf28, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
buf29 = extern_kernels.convolution(buf26, buf28, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf29, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf30 = buf29; del buf29 # reuse
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf30, primals_22, 294912, grid=grid(294912), stream=stream0)
del primals_22
buf31 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf32 = reinterpret_tensor(buf31, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf31 # reuse
buf33 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_7], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf32, primals_24, primals_23, buf33, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution]
buf34 = extern_kernels.convolution(buf30, buf33, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf34, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf35 = buf34; del buf34 # reuse
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf35, primals_25, 294912, grid=grid(294912), stream=stream0)
del primals_25
buf36 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf37 = reinterpret_tensor(buf36, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf36 # reuse
buf38 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_8], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf37, primals_27, primals_26, buf38, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
buf39 = extern_kernels.convolution(buf35, buf38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf40 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_9], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_30, buf40, 18, grid=grid(18), stream=stream0)
buf41 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_9], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_30, primals_29, buf40, buf41, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution]
buf42 = extern_kernels.convolution(buf26, buf41, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf42, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf43 = buf42; del buf42 # reuse
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf43, primals_31, 294912, grid=grid(294912), stream=stream0)
del primals_31
buf44 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf45 = reinterpret_tensor(buf44, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf44 # reuse
buf46 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_10], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf45, primals_33, primals_32, buf46, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution]
buf47 = extern_kernels.convolution(buf43, buf46, stride=(1, 1), padding=(3, 3), dilation=(3, 3), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf47, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf48 = buf47; del buf47 # reuse
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf48, primals_34, 294912, grid=grid(294912), stream=stream0)
del primals_34
buf49 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf50 = reinterpret_tensor(buf49, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf49 # reuse
buf51 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_11], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf50, primals_36, primals_35, buf51, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution]
buf52 = extern_kernels.convolution(buf48, buf51, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf53 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf39, primals_28, buf52, primals_37, buf53, 294912, grid=grid(294912), stream=stream0)
buf54 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_12], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_39, buf54, 18, grid=grid(18), stream=stream0)
buf55 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_12], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_39, primals_38, buf54, buf55, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution]
buf56 = extern_kernels.convolution(buf53, buf55, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf56, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf57 = buf56; del buf56 # reuse
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf57, primals_40, 294912, grid=grid(294912), stream=stream0)
del primals_40
buf58 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf59 = reinterpret_tensor(buf58, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf58 # reuse
buf60 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_13], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf59, primals_42, primals_41, buf60, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.convolution]
buf61 = extern_kernels.convolution(buf57, buf60, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf61, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf62 = buf61; del buf61 # reuse
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf62, primals_43, 294912, grid=grid(294912), stream=stream0)
del primals_43
buf63 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf64 = reinterpret_tensor(buf63, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf63 # reuse
buf65 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_14], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf64, primals_45, primals_44, buf65, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution]
buf66 = extern_kernels.convolution(buf62, buf65, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf67 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_15], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_48, buf67, 18, grid=grid(18), stream=stream0)
buf68 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_15], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_48, primals_47, buf67, buf68, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution]
buf69 = extern_kernels.convolution(buf53, buf68, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf69, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf70 = buf69; del buf69 # reuse
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf70, primals_49, 294912, grid=grid(294912), stream=stream0)
del primals_49
buf71 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf72 = reinterpret_tensor(buf71, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf71 # reuse
buf73 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_16], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf72, primals_51, primals_50, buf73, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution]
buf74 = extern_kernels.convolution(buf70, buf73, stride=(1, 1), padding=(3, 3), dilation=(3, 3), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf74, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf75 = buf74; del buf74 # reuse
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf75, primals_52, 294912, grid=grid(294912), stream=stream0)
del primals_52
buf76 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf77 = reinterpret_tensor(buf76, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf76 # reuse
buf78 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_17], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf77, primals_54, primals_53, buf78, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_17], Original ATen: [aten.convolution]
buf79 = extern_kernels.convolution(buf75, buf78, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf79, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf80 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_4], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf66, primals_46, buf79, primals_55, buf80, 294912, grid=grid(294912), stream=stream0)
buf81 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf82 = reinterpret_tensor(buf81, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf81 # reuse
buf83 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_18], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_6.run(buf82, primals_57, primals_56, buf83, 18, 18, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf84 = extern_kernels.convolution(buf80, buf83, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf84, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf85 = buf84; del buf84 # reuse
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_7.run(buf85, primals_58, primals_1, 294912, grid=grid(294912), stream=stream0)
del primals_58
buf86 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_17, c2_4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8.run(buf79, primals_55, buf86, 147456, grid=grid(147456), stream=stream0)
del buf79
del primals_55
buf87 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_14, c1_4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8.run(buf66, primals_46, buf87, 147456, grid=grid(147456), stream=stream0)
del buf66
del primals_46
buf88 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_11, c2_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8.run(buf52, primals_37, buf88, 147456, grid=grid(147456), stream=stream0)
del buf52
del primals_37
buf89 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_8, c1_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8.run(buf39, primals_28, buf89, 147456, grid=grid(147456), stream=stream0)
del buf39
del primals_28
buf90 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_5, c2_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8.run(buf25, primals_19, buf90, 147456, grid=grid(147456), stream=stream0)
del buf25
del primals_19
buf91 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_2, c1_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8.run(buf12, primals_10, buf91, 147456, grid=grid(147456), stream=stream0)
del buf12
del primals_10
return (buf85, buf1, buf6, buf11, buf14, buf19, buf24, buf28, buf33, buf38, buf41, buf46, buf51, buf55, buf60, buf65, buf68, buf73, buf78, buf83, primals_1, primals_2, primals_3, primals_5, primals_6, primals_8, primals_9, primals_11, primals_12, primals_14, primals_15, primals_17, primals_18, primals_20, primals_21, primals_23, primals_24, primals_26, primals_27, primals_29, primals_30, primals_32, primals_33, primals_35, primals_36, primals_38, primals_39, primals_41, primals_42, primals_44, primals_45, primals_47, primals_48, primals_50, primals_51, primals_53, primals_54, primals_56, primals_57, buf0, buf1, buf3, buf5, buf6, buf8, buf10, buf11, buf13, buf14, buf16, buf18, buf19, buf21, buf23, buf24, buf26, buf27, buf28, buf30, buf32, buf33, buf35, buf37, buf38, buf40, buf41, buf43, buf45, buf46, buf48, buf50, buf51, buf53, buf54, buf55, buf57, buf59, buf60, buf62, buf64, buf65, buf67, buf68, buf70, buf72, buf73, buf75, buf77, buf78, buf80, buf82, buf83, buf86, buf87, buf88, buf89, buf90, buf91, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 18, 64, 64), (73728, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_48 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_49 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_50 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_51 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_52 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_53 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_54 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_55 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_56 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_57 = rand_strided((18, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_58 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, groups=3):
super(ConvBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
def wn(x):
return torch.nn.utils.weight_norm(x)
self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
1, groups=self.groups))
self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
3, padding=1, groups=in_channels))
self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels,
1, groups=1))
def forward(self, x):
x = self.group_conv(x)
x = self.depth_conv(x)
x = self.point_conv(x)
return x
class ConvBlockD(nn.Module):
def __init__(self, in_channels, out_channels, groups=3, ker_size=2):
super(ConvBlockD, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
def wn(x):
return torch.nn.utils.weight_norm(x)
self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
1, groups=self.groups))
self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
3, padding=ker_size, dilation=ker_size, groups=in_channels))
self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels,
1, groups=1))
def forward(self, x):
x = self.group_conv(x)
x = self.depth_conv(x)
x = self.point_conv(x)
return x
class MIRB3(nn.Module):
def __init__(self, args):
super(MIRB3, self).__init__()
self.c_out = args.n_feats // 2
def wn(x):
return torch.nn.utils.weight_norm(x)
self.conv3_1 = ConvBlock(args.n_feats, self.c_out)
self.convd_1 = ConvBlockD(args.n_feats, self.c_out, ker_size=3)
self.conv3_2 = ConvBlock(args.n_feats, self.c_out)
self.convd_2 = ConvBlockD(args.n_feats, self.c_out, ker_size=3)
self.conv3_3 = ConvBlock(args.n_feats, self.c_out)
self.convd_3 = ConvBlockD(args.n_feats, self.c_out, ker_size=3)
self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1))
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
res = x
c1_1 = self.lrelu(self.conv3_1(res))
c2_1 = self.lrelu(self.convd_1(res))
c1_2 = self.lrelu(self.conv3_2(torch.cat([c1_1, c2_1], 1)))
c2_2 = self.lrelu(self.convd_2(torch.cat([c1_1, c2_1], 1)))
c1_4 = self.lrelu(self.conv3_3(torch.cat([c1_2, c2_2], 1)))
c2_4 = self.lrelu(self.convd_3(torch.cat([c1_2, c2_2], 1)))
out = self.conv_last(torch.cat([c1_4, c2_4], 1))
out = out + x
return out
def get_inputs():
return [torch.rand([4, 18, 64, 64])]
def get_init_inputs():
return [[], {'args': _mock_config(n_feats=18)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 18
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 6 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 6 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 6 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 6 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (4 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (5 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tl.store(out_ptr0 + x0, tmp17, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 108
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 6
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 18
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_per_fused__weight_norm_interface_3(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 18
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 9 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 9 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_per_fused__weight_norm_interface_4(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 9
rnumel = 18
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 18 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 18 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 18
x0 = xindex % 4096
x2 = xindex // 73728
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 9, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 36864 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = 0.0
tmp9 = tmp7 > tmp8
tmp10 = 0.2
tmp11 = tmp7 * tmp10
tmp12 = tl.where(tmp9, tmp7, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tl.full([1], 18, tl.int64)
tmp18 = tl.load(in_ptr2 + (x0 + 4096 * (-9 + x1) + 36864 * x2), tmp15,
other=0.0)
tmp19 = tl.load(in_ptr3 + (-9 + x1), tmp15, eviction_policy=
'evict_last', other=0.0)
tmp20 = tmp18 + tmp19
tmp21 = tmp20 > tmp8
tmp22 = tmp20 * tmp10
tmp23 = tl.where(tmp21, tmp20, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp15, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp14, tmp25)
tl.store(out_ptr0 + x3, tmp26, None)
@triton.jit
def triton_per_fused__weight_norm_interface_6(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 18
rnumel = 18
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 18 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 18 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_poi_fused_add_convolution_7(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 18
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 9
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + x3, tmp8, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57, primals_58
) = args
args.clear()
assert_size_stride(primals_1, (4, 18, 64, 64), (73728, 4096, 64, 1))
assert_size_stride(primals_2, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_3, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_4, (18,), (1,))
assert_size_stride(primals_5, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_6, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_7, (18,), (1,))
assert_size_stride(primals_8, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_9, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_10, (9,), (1,))
assert_size_stride(primals_11, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_12, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_13, (18,), (1,))
assert_size_stride(primals_14, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_15, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_16, (18,), (1,))
assert_size_stride(primals_17, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_18, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_19, (9,), (1,))
assert_size_stride(primals_20, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_21, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_22, (18,), (1,))
assert_size_stride(primals_23, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_24, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_25, (18,), (1,))
assert_size_stride(primals_26, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_27, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_28, (9,), (1,))
assert_size_stride(primals_29, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_30, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_31, (18,), (1,))
assert_size_stride(primals_32, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_33, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_34, (18,), (1,))
assert_size_stride(primals_35, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_36, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_37, (9,), (1,))
assert_size_stride(primals_38, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_39, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_40, (18,), (1,))
assert_size_stride(primals_41, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_42, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_43, (18,), (1,))
assert_size_stride(primals_44, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_45, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_46, (9,), (1,))
assert_size_stride(primals_47, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_48, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_49, (18,), (1,))
assert_size_stride(primals_50, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_51, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_52, (18,), (1,))
assert_size_stride(primals_53, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_54, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_55, (9,), (1,))
assert_size_stride(primals_56, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_57, (18, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_58, (18,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_3, buf0,
18, XBLOCK=32, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_3,
primals_2, buf0, buf1, 108, XBLOCK=128, num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(primals_1, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf2, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_2[grid(294912)](buf3, primals_4,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32
)
buf5 = reinterpret_tensor(buf4, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf4
buf6 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf5, primals_6,
primals_5, buf6, 18, 9, XBLOCK=32, num_warps=4, num_stages=1)
buf7 = extern_kernels.convolution(buf3, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf7, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf8 = buf7
del buf7
triton_poi_fused_convolution_2[grid(294912)](buf8, primals_7,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf9 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf10 = reinterpret_tensor(buf9, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf9
buf11 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf10, primals_9,
primals_8, buf11, 9, 18, XBLOCK=1, num_warps=2, num_stages=1)
buf12 = extern_kernels.convolution(buf8, buf11, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf13 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_12,
buf13, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_12,
primals_11, buf13, buf14, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf15 = extern_kernels.convolution(primals_1, buf14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf15, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf16 = buf15
del buf15
triton_poi_fused_convolution_2[grid(294912)](buf16, primals_13,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_13
buf17 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf18 = reinterpret_tensor(buf17, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf17
buf19 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf18,
primals_15, primals_14, buf19, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf20 = extern_kernels.convolution(buf16, buf19, stride=(1, 1),
padding=(3, 3), dilation=(3, 3), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf20, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_2[grid(294912)](buf21, primals_16,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_16
buf22 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf23 = reinterpret_tensor(buf22, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf22
buf24 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf23,
primals_18, primals_17, buf24, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf25 = extern_kernels.convolution(buf21, buf24, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf26 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf12, primals_10, buf25,
primals_19, buf26, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf27 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_21,
buf27, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf28 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_21,
primals_20, buf27, buf28, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf29 = extern_kernels.convolution(buf26, buf28, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf29, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf30 = buf29
del buf29
triton_poi_fused_convolution_2[grid(294912)](buf30, primals_22,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_22
buf31 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf32 = reinterpret_tensor(buf31, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf31
buf33 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf32,
primals_24, primals_23, buf33, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf34 = extern_kernels.convolution(buf30, buf33, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf34, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf35 = buf34
del buf34
triton_poi_fused_convolution_2[grid(294912)](buf35, primals_25,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_25
buf36 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf37 = reinterpret_tensor(buf36, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf36
buf38 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf37,
primals_27, primals_26, buf38, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf39 = extern_kernels.convolution(buf35, buf38, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf40 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_30,
buf40, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf41 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_30,
primals_29, buf40, buf41, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf42 = extern_kernels.convolution(buf26, buf41, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf42, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf43 = buf42
del buf42
triton_poi_fused_convolution_2[grid(294912)](buf43, primals_31,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_31
buf44 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf45 = reinterpret_tensor(buf44, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf44
buf46 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf45,
primals_33, primals_32, buf46, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf47 = extern_kernels.convolution(buf43, buf46, stride=(1, 1),
padding=(3, 3), dilation=(3, 3), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf47, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf48 = buf47
del buf47
triton_poi_fused_convolution_2[grid(294912)](buf48, primals_34,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_34
buf49 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf50 = reinterpret_tensor(buf49, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf49
buf51 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf50,
primals_36, primals_35, buf51, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf52 = extern_kernels.convolution(buf48, buf51, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf53 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf39, primals_28, buf52,
primals_37, buf53, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf54 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_39,
buf54, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf55 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_39,
primals_38, buf54, buf55, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf56 = extern_kernels.convolution(buf53, buf55, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf56, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf57 = buf56
del buf56
triton_poi_fused_convolution_2[grid(294912)](buf57, primals_40,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_40
buf58 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf59 = reinterpret_tensor(buf58, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf58
buf60 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf59,
primals_42, primals_41, buf60, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf61 = extern_kernels.convolution(buf57, buf60, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf61, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf62 = buf61
del buf61
triton_poi_fused_convolution_2[grid(294912)](buf62, primals_43,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_43
buf63 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf64 = reinterpret_tensor(buf63, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf63
buf65 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf64,
primals_45, primals_44, buf65, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf66 = extern_kernels.convolution(buf62, buf65, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf67 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_48,
buf67, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf68 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_48,
primals_47, buf67, buf68, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf69 = extern_kernels.convolution(buf53, buf68, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf69, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf70 = buf69
del buf69
triton_poi_fused_convolution_2[grid(294912)](buf70, primals_49,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_49
buf71 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf72 = reinterpret_tensor(buf71, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf71
buf73 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf72,
primals_51, primals_50, buf73, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf74 = extern_kernels.convolution(buf70, buf73, stride=(1, 1),
padding=(3, 3), dilation=(3, 3), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf74, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf75 = buf74
del buf74
triton_poi_fused_convolution_2[grid(294912)](buf75, primals_52,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_52
buf76 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf77 = reinterpret_tensor(buf76, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf76
buf78 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf77,
primals_54, primals_53, buf78, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf79 = extern_kernels.convolution(buf75, buf78, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf79, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf80 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf66, primals_46, buf79,
primals_55, buf80, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf81 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf82 = reinterpret_tensor(buf81, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf81
buf83 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_6[grid(18)](buf82,
primals_57, primals_56, buf83, 18, 18, XBLOCK=32, num_warps=8,
num_stages=1)
buf84 = extern_kernels.convolution(buf80, buf83, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf84, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf85 = buf84
del buf84
triton_poi_fused_add_convolution_7[grid(294912)](buf85, primals_58,
primals_1, 294912, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_58
buf86 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid(
147456)](buf79, primals_55, buf86, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf79
del primals_55
buf87 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid(
147456)](buf66, primals_46, buf87, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf66
del primals_46
buf88 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid(
147456)](buf52, primals_37, buf88, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf52
del primals_37
buf89 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid(
147456)](buf39, primals_28, buf89, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf39
del primals_28
buf90 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid(
147456)](buf25, primals_19, buf90, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf25
del primals_19
buf91 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid(
147456)](buf12, primals_10, buf91, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf12
del primals_10
return (buf85, buf1, buf6, buf11, buf14, buf19, buf24, buf28, buf33,
buf38, buf41, buf46, buf51, buf55, buf60, buf65, buf68, buf73,
buf78, buf83, primals_1, primals_2, primals_3, primals_5, primals_6,
primals_8, primals_9, primals_11, primals_12, primals_14,
primals_15, primals_17, primals_18, primals_20, primals_21,
primals_23, primals_24, primals_26, primals_27, primals_29,
primals_30, primals_32, primals_33, primals_35, primals_36,
primals_38, primals_39, primals_41, primals_42, primals_44,
primals_45, primals_47, primals_48, primals_50, primals_51,
primals_53, primals_54, primals_56, primals_57, buf0, buf1, buf3,
buf5, buf6, buf8, buf10, buf11, buf13, buf14, buf16, buf18, buf19,
buf21, buf23, buf24, buf26, buf27, buf28, buf30, buf32, buf33,
buf35, buf37, buf38, buf40, buf41, buf43, buf45, buf46, buf48,
buf50, buf51, buf53, buf54, buf55, buf57, buf59, buf60, buf62,
buf64, buf65, buf67, buf68, buf70, buf72, buf73, buf75, buf77,
buf78, buf80, buf82, buf83, buf86, buf87, buf88, buf89, buf90, buf91)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, groups=3):
super(ConvBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
def wn(x):
return torch.nn.utils.weight_norm(x)
self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
1, groups=self.groups))
self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
3, padding=1, groups=in_channels))
self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels,
1, groups=1))
def forward(self, x):
x = self.group_conv(x)
x = self.depth_conv(x)
x = self.point_conv(x)
return x
class ConvBlockD(nn.Module):
def __init__(self, in_channels, out_channels, groups=3, ker_size=2):
super(ConvBlockD, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
def wn(x):
return torch.nn.utils.weight_norm(x)
self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
1, groups=self.groups))
self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
3, padding=ker_size, dilation=ker_size, groups=in_channels))
self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels,
1, groups=1))
def forward(self, x):
x = self.group_conv(x)
x = self.depth_conv(x)
x = self.point_conv(x)
return x
class MIRB3New(nn.Module):
def __init__(self, args):
super(MIRB3New, self).__init__()
self.c_out = args.n_feats // 2
def wn(x):
return torch.nn.utils.weight_norm(x)
self.conv3_1 = ConvBlock(args.n_feats, self.c_out)
self.convd_1 = ConvBlockD(args.n_feats, self.c_out, ker_size=3)
self.conv3_2 = ConvBlock(args.n_feats, self.c_out)
self.convd_2 = ConvBlockD(args.n_feats, self.c_out, ker_size=3)
self.conv3_3 = ConvBlock(args.n_feats, self.c_out)
self.convd_3 = ConvBlockD(args.n_feats, self.c_out, ker_size=3)
self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1))
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, input_0):
primals_4 = self.conv3_1.group_conv.bias
primals_2 = self.conv3_1.group_conv.weight_g
primals_3 = self.conv3_1.group_conv.weight_v
primals_7 = self.conv3_1.depth_conv.bias
primals_5 = self.conv3_1.depth_conv.weight_g
primals_6 = self.conv3_1.depth_conv.weight_v
primals_10 = self.conv3_1.point_conv.bias
primals_8 = self.conv3_1.point_conv.weight_g
primals_9 = self.conv3_1.point_conv.weight_v
primals_13 = self.convd_1.group_conv.bias
primals_11 = self.convd_1.group_conv.weight_g
primals_12 = self.convd_1.group_conv.weight_v
primals_16 = self.convd_1.depth_conv.bias
primals_14 = self.convd_1.depth_conv.weight_g
primals_15 = self.convd_1.depth_conv.weight_v
primals_19 = self.convd_1.point_conv.bias
primals_17 = self.convd_1.point_conv.weight_g
primals_18 = self.convd_1.point_conv.weight_v
primals_22 = self.conv3_2.group_conv.bias
primals_20 = self.conv3_2.group_conv.weight_g
primals_21 = self.conv3_2.group_conv.weight_v
primals_25 = self.conv3_2.depth_conv.bias
primals_23 = self.conv3_2.depth_conv.weight_g
primals_24 = self.conv3_2.depth_conv.weight_v
primals_28 = self.conv3_2.point_conv.bias
primals_26 = self.conv3_2.point_conv.weight_g
primals_27 = self.conv3_2.point_conv.weight_v
primals_31 = self.convd_2.group_conv.bias
primals_29 = self.convd_2.group_conv.weight_g
primals_30 = self.convd_2.group_conv.weight_v
primals_34 = self.convd_2.depth_conv.bias
primals_32 = self.convd_2.depth_conv.weight_g
primals_33 = self.convd_2.depth_conv.weight_v
primals_37 = self.convd_2.point_conv.bias
primals_35 = self.convd_2.point_conv.weight_g
primals_36 = self.convd_2.point_conv.weight_v
primals_40 = self.conv3_3.group_conv.bias
primals_38 = self.conv3_3.group_conv.weight_g
primals_39 = self.conv3_3.group_conv.weight_v
primals_43 = self.conv3_3.depth_conv.bias
primals_41 = self.conv3_3.depth_conv.weight_g
primals_42 = self.conv3_3.depth_conv.weight_v
primals_46 = self.conv3_3.point_conv.bias
primals_44 = self.conv3_3.point_conv.weight_g
primals_45 = self.conv3_3.point_conv.weight_v
primals_49 = self.convd_3.group_conv.bias
primals_47 = self.convd_3.group_conv.weight_g
primals_48 = self.convd_3.group_conv.weight_v
primals_52 = self.convd_3.depth_conv.bias
primals_50 = self.convd_3.depth_conv.weight_g
primals_51 = self.convd_3.depth_conv.weight_v
primals_55 = self.convd_3.point_conv.bias
primals_53 = self.convd_3.point_conv.weight_g
primals_54 = self.convd_3.point_conv.weight_v
primals_58 = self.conv_last.bias
primals_56 = self.conv_last.weight_g
primals_57 = self.conv_last.weight_v
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58])
return output[0]
| wwjfsfs/wwjyyds | MIRB3 | false | 13,189 | [
"MIT"
]
| 0 | 80cd6267fde7cd98838078a0d5178a557ceb7414 | https://github.com/wwjfsfs/wwjyyds/tree/80cd6267fde7cd98838078a0d5178a557ceb7414 |
Pointer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/ku/ckutxoa3tdkp4vgpaf6cdwo3umpfmhw4aepnimgnqqvfrbw2wcgq.py
# Topologically Sorted Source Nodes: [X1, X2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# X1 => cat
# X2 => cat_1
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_3], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 8
x0 = xindex % 4
x2 = (xindex // 32)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (4*((-4) + x1)) + (16*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp11 = tl.load(in_ptr2 + (x0 + (4*((-4) + x1)) + (16*x2)), tmp6 & xmask, other=0.0)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + (x3), tmp10, xmask)
tl.store(out_ptr1 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/lj/cljrqgtjvj3sotyhumpepmt4by4ntzixml6oyfutn3hxxwv4cfyj.py
# Topologically Sorted Source Nodes: [mul, sub, mul_1, Y1, mul_2, Y2], Original ATen: [aten.mul, aten.rsub, aten.add]
# Source node to ATen node mapping:
# Y1 => add
# Y2 => add_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# sub => sub
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %primals_5), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %primals_5), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -1e+30), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, %primals_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_1), kwargs = {})
triton_poi_fused_add_mul_rsub_1 = async_compile.triton('triton_poi_fused_add_mul_rsub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp8 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp1
tmp5 = -1e+30
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp8 * tmp1
tmp10 = tmp9 + tmp6
tl.store(out_ptr0 + (x2), tmp7, xmask)
tl.store(out_ptr1 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1, 8, 1), (8, 1, 1))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_6, (1, 8, 1), (8, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [X1, X2], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, primals_3, buf0, buf1, 128, grid=grid(128), stream=stream0)
del primals_1
del primals_2
del primals_3
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 4), (4, 4, 1))
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf1, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 4), (4, 4, 1))
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sub, mul_1, Y1, mul_2, Y2], Original ATen: [aten.mul, aten.rsub, aten.add]
triton_poi_fused_add_mul_rsub_1.run(buf2, primals_5, buf4, buf3, buf5, 64, grid=grid(64), stream=stream0)
del buf2
del buf4
return (buf3, buf5, primals_4, primals_5, primals_6, buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 8, 1), (8, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 8, 1), (8, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def mask_logits(target, mask):
mask = mask.type(torch.float32)
return target * mask + (1 - mask) * -1e+30
class Initialized_Conv1d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, groups=1, relu=False, bias=False):
super().__init__()
self.out = nn.Conv1d(in_channels, out_channels, kernel_size, stride
=stride, padding=padding, groups=groups, bias=bias)
if relu is True:
self.relu = True
nn.init.kaiming_normal_(self.out.weight, nonlinearity='relu')
else:
self.relu = False
nn.init.xavier_uniform_(self.out.weight)
def forward(self, x):
if self.relu is True:
return F.relu(self.out(x))
else:
return self.out(x)
class Pointer(nn.Module):
def __init__(self, d_model):
super().__init__()
self.w1 = Initialized_Conv1d(d_model * 2, 1)
self.w2 = Initialized_Conv1d(d_model * 2, 1)
def forward(self, M1, M2, M3, mask):
X1 = torch.cat([M1, M2], dim=1)
X2 = torch.cat([M1, M3], dim=1)
Y1 = mask_logits(self.w1(X1).squeeze(), mask)
Y2 = mask_logits(self.w2(X2).squeeze(), mask)
return Y1, Y2
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 8
x0 = xindex % 4
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp11 = tl.load(in_ptr2 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp6 & xmask,
other=0.0)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + x3, tmp10, xmask)
tl.store(out_ptr1 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp8 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp1
tmp5 = -1e+30
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp8 * tmp1
tmp10 = tmp9 + tmp6
tl.store(out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr1 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1, 8, 1), (8, 1, 1))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_6, (1, 8, 1), (8, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, primals_2, primals_3,
buf0, buf1, 128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
del primals_3
buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 4), (4, 4, 1))
buf4 = extern_kernels.convolution(buf1, primals_6, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 4), (4, 4, 1))
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_1[grid(64)](buf2, primals_5, buf4,
buf3, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf2
del buf4
return buf3, buf5, primals_4, primals_5, primals_6, buf0, buf1
def mask_logits(target, mask):
mask = mask.type(torch.float32)
return target * mask + (1 - mask) * -1e+30
class Initialized_Conv1d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, groups=1, relu=False, bias=False):
super().__init__()
self.out = nn.Conv1d(in_channels, out_channels, kernel_size, stride
=stride, padding=padding, groups=groups, bias=bias)
if relu is True:
self.relu = True
nn.init.kaiming_normal_(self.out.weight, nonlinearity='relu')
else:
self.relu = False
nn.init.xavier_uniform_(self.out.weight)
def forward(self, x):
if self.relu is True:
return F.relu(self.out(x))
else:
return self.out(x)
class PointerNew(nn.Module):
def __init__(self, d_model):
super().__init__()
self.w1 = Initialized_Conv1d(d_model * 2, 1)
self.w2 = Initialized_Conv1d(d_model * 2, 1)
def forward(self, input_0, input_1, input_2, input_3):
primals_4 = self.w1.out.weight
primals_6 = self.w2.out.weight
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
primals_5 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
| timgianitsos/squad | Pointer | false | 13,190 | [
"MIT"
]
| 0 | 6ab502652e3528cfeeddfb8eba05221443a35294 | https://github.com/timgianitsos/squad/tree/6ab502652e3528cfeeddfb8eba05221443a35294 |
FSPool | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/sq/csqnktw2c6csqvramful4czuhd4tfhsf7zzplpqu3jc57nt7k4un.py
# Topologically Sorted Source Nodes: [idx], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# idx => convert_element_type_5
# Graph fragment:
# %convert_element_type_5 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%expand_2, torch.int64), kwargs = {})
triton_poi_fused__to_copy_0 = async_compile.triton('triton_poi_fused__to_copy_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.3333333333333333
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = triton_helpers.minimum(tmp3, tmp4)
tmp6 = 4.0
tmp7 = tmp5 * tmp6
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/54/c545voox7nqjpyh5darlb7rliu5ct4le7sbuf5ib4s3jexz6n6p5.py
# Topologically Sorted Source Nodes: [add, clamp_2], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# add => add_1
# clamp_2 => clamp_max_1
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_5, 1), kwargs = {})
# %clamp_max_1 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_1, 4), kwargs = {})
triton_poi_fused_add_clamp_1 = async_compile.triton('triton_poi_fused_add_clamp_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.3333333333333333
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = triton_helpers.minimum(tmp3, tmp4)
tmp6 = 4.0
tmp7 = tmp5 * tmp6
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 4, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/gs/cgsfgtmwmypqp3ikbovp5il44p5divoynxsgh6otuoctepklpra7.py
# Topologically Sorted Source Nodes: [sub_2, mul_3, x, sort], Original ATen: [aten.rsub, aten.mul, aten.add, aten.sort]
# Source node to ATen node mapping:
# mul_3 => mul_5
# sort => sort
# sub_2 => sub_3
# x => add_3
# Graph fragment:
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %expand), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, -99999), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %mul_5), kwargs = {})
# %sort : [num_users=2] = call_function[target=torch.ops.aten.sort.default](args = (%add_3, 2, True), kwargs = {})
triton_per_fused_add_mul_rsub_sort_2 = async_compile.triton('triton_per_fused_add_mul_rsub_sort_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i16', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_rsub_sort_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mul_rsub_sort_2(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0 + (4*r2) + (16*x1)), xmask, other=0.0)
tmp1 = x0
tmp2 = tmp1.to(tl.float32)
tmp3 = 0.3333333333333333
tmp4 = tmp2 * tmp3
tmp5 = 1.0
tmp6 = tmp4 <= tmp5
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = -99999.0
tmp10 = tmp8 * tmp9
tmp11 = tmp0 + tmp10
tmp12 = r2
tmp13 = tmp12.to(tl.int16)
tmp14 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp15 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16, tmp17, = triton_helpers.sort_with_index(tmp14, tmp15, None, 1, stable=False, descending=True)
tl.store(out_ptr0 + (x0 + (4*r2) + (16*x1)), tmp16, xmask)
tl.store(out_ptr1 + (x0 + (4*r2) + (16*x1)), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/z6/cz6uxyl4hlqby3opvlrc4o6lo4rqjfvt6ydda35keaemenyd33h6.py
# Topologically Sorted Source Nodes: [sort], Original ATen: [aten.sort]
# Source node to ATen node mapping:
# sort => getitem_1
# Graph fragment:
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%sort, 1), kwargs = {})
triton_poi_fused_sort_3 = async_compile.triton('triton_poi_fused_sort_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*i16', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sort_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sort_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0.to(tl.int64)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/xf/cxfggmzhgy2wrwmfgl7sd6us2bld52d5ohta45ocem3ah7qhzdxm.py
# Topologically Sorted Source Nodes: [frac, left, right, sub_1, mul_1, mul_2, weight_2, mul_4, mul_5], Original ATen: [aten.frac, aten.gather, aten.rsub, aten.mul, aten.add]
# Source node to ATen node mapping:
# frac => abs_1, floor, mul_2, sign, sub_1
# left => gather
# mul_1 => mul_3
# mul_2 => mul_4
# mul_4 => mul_6
# mul_5 => mul_7
# right => gather_1
# sub_1 => sub_2
# weight_2 => add_2
# Graph fragment:
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%expand_2,), kwargs = {})
# %floor : [num_users=1] = call_function[target=torch.ops.aten.floor.default](args = (%abs_1,), kwargs = {})
# %sign : [num_users=1] = call_function[target=torch.ops.aten.sign.default](args = (%expand_2,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%floor, %sign), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%expand_2, %mul_2), kwargs = {})
# %gather : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%expand_1, 2, %convert_element_type_5), kwargs = {})
# %gather_1 : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%expand_1, 2, %clamp_max_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sub_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %gather), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %gather_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_4), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %add_2), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %expand), kwargs = {})
triton_poi_fused_add_frac_gather_mul_rsub_4 = async_compile.triton('triton_poi_fused_add_frac_gather_mul_rsub_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_frac_gather_mul_rsub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_frac_gather_mul_rsub_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = x0
tmp2 = tmp1.to(tl.float32)
tmp3 = 0.3333333333333333
tmp4 = tmp2 * tmp3
tmp5 = 1.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 4.0
tmp8 = tmp6 * tmp7
tmp9 = tl_math.abs(tmp8)
tmp10 = libdevice.floor(tmp9)
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = tmp11 < tmp8
tmp13 = tmp12.to(tl.int8)
tmp14 = tmp8 < tmp11
tmp15 = tmp14.to(tl.int8)
tmp16 = tmp13 - tmp15
tmp17 = tmp16.to(tmp8.dtype)
tmp18 = tmp10 * tmp17
tmp19 = tmp8 - tmp18
tmp20 = tmp5 - tmp19
tmp21 = tmp8.to(tl.int32)
tmp22 = tl.load(in_ptr1 + (tmp21 + (5*x1)), xmask, eviction_policy='evict_last')
tmp23 = tmp20 * tmp22
tmp24 = tl.full([1], 1, tl.int64)
tmp25 = tmp21 + tmp24
tmp26 = tl.full([1], 4, tl.int64)
tmp27 = triton_helpers.minimum(tmp25, tmp26)
tmp28 = tl.load(in_ptr1 + (tmp27 + (5*x1)), xmask, eviction_policy='evict_last')
tmp29 = tmp19 * tmp28
tmp30 = tmp23 + tmp29
tmp31 = tmp0 * tmp30
tmp32 = tmp4 <= tmp5
tmp33 = tmp32.to(tl.float32)
tmp34 = tmp31 * tmp33
tl.store(out_ptr0 + (x3), tmp34, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/xx/cxxuau3xolwyk2hkjk27ygqr4crjwdi3te4o7deouwlnjqiewsyc.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# x_2 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_7, [2]), kwargs = {})
triton_poi_fused_sum_5 = async_compile.triton('triton_poi_fused_sum_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 5), (5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
# Topologically Sorted Source Nodes: [idx], Original ATen: [aten._to_copy]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_0.run(buf0, 64, grid=grid(64), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
# Topologically Sorted Source Nodes: [add, clamp_2], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_1.run(buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int16)
# Topologically Sorted Source Nodes: [sub_2, mul_3, x, sort], Original ATen: [aten.rsub, aten.mul, aten.add, aten.sort]
triton_per_fused_add_mul_rsub_sort_2.run(primals_1, buf2, buf3, 64, 4, grid=grid(64), stream=stream0)
del primals_1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
# Topologically Sorted Source Nodes: [sort], Original ATen: [aten.sort]
triton_poi_fused_sort_3.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [frac, left, right, sub_1, mul_1, mul_2, weight_2, mul_4, mul_5], Original ATen: [aten.frac, aten.gather, aten.rsub, aten.mul, aten.add]
triton_poi_fused_add_frac_gather_mul_rsub_4.run(buf2, primals_2, buf5, 256, grid=grid(256), stream=stream0)
del primals_2
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.sum]
triton_poi_fused_sum_5.run(buf5, buf6, 64, grid=grid(64), stream=stream0)
del buf5
return (buf6, buf4, buf0, buf1, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 5), (5, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
def deterministic_sort(s, tau):
"""
"Stochastic Optimization of Sorting Networks via Continuous Relaxations" https://openreview.net/forum?id=H1eSS3CcKX
Aditya Grover, Eric Wang, Aaron Zweig, Stefano Ermon
s: input elements to be sorted. Shape: batch_size x n x 1
tau: temperature for relaxation. Scalar.
"""
n = s.size()[1]
one = torch.ones((n, 1), dtype=torch.float32, device=s.device)
A_s = torch.abs(s - s.permute(0, 2, 1))
B = torch.matmul(A_s, torch.matmul(one, one.transpose(0, 1)))
scaling = (n + 1 - 2 * (torch.arange(n, device=s.device) + 1)).type(torch
.float32)
C = torch.matmul(s, scaling.unsqueeze(0))
P_max = (C - B).permute(0, 2, 1)
sm = torch.nn.Softmax(-1)
P_hat = sm(P_max / tau)
return P_hat
def cont_sort(x, perm=None, temp=1):
""" Helper function that calls deterministic_sort with the right shape.
Since it assumes a shape of (batch_size, n, 1) while the input x is of shape (batch_size, channels, n),
we can get this to the right shape by merging the first two dimensions.
If an existing perm is passed in, we compute the "inverse" (transpose of perm) and just use that to unsort x.
"""
original_size = x.size()
x = x.view(-1, x.size(2), 1)
if perm is None:
perm = deterministic_sort(x, temp)
else:
perm = perm.transpose(1, 2)
x = perm.matmul(x)
x = x.view(original_size)
return x, perm
def fill_sizes(sizes, x=None):
"""
sizes is a LongTensor of size [batch_size], containing the set sizes.
Each set size n is turned into [0/(n-1), 1/(n-1), ..., (n-2)/(n-1), 1, 0, 0, ..., 0, 0].
These are the ratios r at which f is evaluated at.
The 0s at the end are there for padding to the largest n in the batch.
If the input set x is passed in, it guarantees that the mask is the correct size even when sizes.max()
is less than x.size(), which can be a case if there is at least one padding element in each set in the batch.
"""
if x is not None:
max_size = x.size(2)
else:
max_size = sizes.max()
size_tensor = sizes.new(sizes.size(0), max_size).float().fill_(-1)
size_tensor = torch.arange(end=max_size, device=sizes.device, dtype=
torch.float32)
size_tensor = size_tensor.unsqueeze(0) / (sizes.float() - 1).clamp(min=1
).unsqueeze(1)
mask = size_tensor <= 1
mask = mask.unsqueeze(1)
return size_tensor.clamp(max=1), mask.float()
class FSPool(nn.Module):
"""
Featurewise sort pooling. From:
FSPool: Learning Set Representations with Featurewise Sort Pooling.
Yan Zhang, Jonathon Hare, Adam Prügel-Bennett
https://arxiv.org/abs/1906.02795
https://github.com/Cyanogenoid/fspool
"""
def __init__(self, in_channels, n_pieces, relaxed=False):
"""
in_channels: Number of channels in input
n_pieces: Number of pieces in piecewise linear
relaxed: Use sorting networks relaxation instead of traditional sorting
"""
super().__init__()
self.n_pieces = n_pieces
self.weight = nn.Parameter(torch.zeros(in_channels, n_pieces + 1))
self.relaxed = relaxed
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.weight)
def forward(self, x, n=None):
""" FSPool
x: FloatTensor of shape (batch_size, in_channels, set size).
This should contain the features of the elements in the set.
Variable set sizes should be padded to the maximum set size in the batch with 0s.
n: LongTensor of shape (batch_size).
This tensor contains the sizes of each set in the batch.
If not specified, assumes that every set has the same size of x.size(2).
Note that n.max() should never be greater than x.size(2), i.e. the specified set size in the
n tensor must not be greater than the number of elements stored in the x tensor.
Returns: pooled input x, used permutation matrix perm
"""
assert x.size(1) == self.weight.size(0
), 'incorrect number of input channels in weight'
if n is None:
n = x.new(x.size(0)).fill_(x.size(2)).long()
sizes, mask = fill_sizes(n, x)
mask = mask.expand_as(x)
weight = self.determine_weight(sizes)
x = x + (1 - mask).float() * -99999
if self.relaxed:
x, perm = cont_sort(x, temp=self.relaxed)
else:
x, perm = x.sort(dim=2, descending=True)
x = (x * weight * mask.float()).sum(dim=2)
return x, perm
def forward_transpose(self, x, perm, n=None):
""" FSUnpool
x: FloatTensor of shape (batch_size, in_channels)
perm: Permutation matrix returned by forward function.
n: LongTensor fo shape (batch_size)
"""
if n is None:
n = x.new(x.size(0)).fill_(perm.size(2)).long()
sizes, mask = fill_sizes(n)
mask = mask.expand(mask.size(0), x.size(1), mask.size(2))
weight = self.determine_weight(sizes)
x = x.unsqueeze(2) * weight * mask.float()
if self.relaxed:
x, _ = cont_sort(x, perm)
else:
x = x.scatter(2, perm, x)
return x, mask
def determine_weight(self, sizes):
"""
Piecewise linear function. Evaluates f at the ratios in sizes.
This should be a faster implementation than doing the sum over max terms, since we know that most terms in it are 0.
"""
weight = self.weight.unsqueeze(0)
weight = weight.expand(sizes.size(0), weight.size(1), weight.size(2))
index = self.n_pieces * sizes
index = index.unsqueeze(1)
index = index.expand(index.size(0), weight.size(1), index.size(2))
idx = index.long()
frac = index.frac()
left = weight.gather(2, idx)
right = weight.gather(2, (idx + 1).clamp(max=self.n_pieces))
return (1 - frac) * left + frac * right
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'n_pieces': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.3333333333333333
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = triton_helpers.minimum(tmp3, tmp4)
tmp6 = 4.0
tmp7 = tmp5 * tmp6
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.3333333333333333
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = triton_helpers.minimum(tmp3, tmp4)
tmp6 = 4.0
tmp7 = tmp5 * tmp6
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 4, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_per_fused_add_mul_rsub_sort_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x0 + 4 * r2 + 16 * x1), xmask, other=0.0)
tmp1 = x0
tmp2 = tmp1.to(tl.float32)
tmp3 = 0.3333333333333333
tmp4 = tmp2 * tmp3
tmp5 = 1.0
tmp6 = tmp4 <= tmp5
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 - tmp7
tmp9 = -99999.0
tmp10 = tmp8 * tmp9
tmp11 = tmp0 + tmp10
tmp12 = r2
tmp13 = tmp12.to(tl.int16)
tmp14 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp15 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16, tmp17 = triton_helpers.sort_with_index(tmp14, tmp15, None, 1,
stable=False, descending=True)
tl.store(out_ptr0 + (x0 + 4 * r2 + 16 * x1), tmp16, xmask)
tl.store(out_ptr1 + (x0 + 4 * r2 + 16 * x1), tmp17, xmask)
@triton.jit
def triton_poi_fused_sort_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0.to(tl.int64)
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_add_frac_gather_mul_rsub_4(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = x0
tmp2 = tmp1.to(tl.float32)
tmp3 = 0.3333333333333333
tmp4 = tmp2 * tmp3
tmp5 = 1.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 4.0
tmp8 = tmp6 * tmp7
tmp9 = tl_math.abs(tmp8)
tmp10 = libdevice.floor(tmp9)
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = tmp11 < tmp8
tmp13 = tmp12.to(tl.int8)
tmp14 = tmp8 < tmp11
tmp15 = tmp14.to(tl.int8)
tmp16 = tmp13 - tmp15
tmp17 = tmp16.to(tmp8.dtype)
tmp18 = tmp10 * tmp17
tmp19 = tmp8 - tmp18
tmp20 = tmp5 - tmp19
tmp21 = tmp8.to(tl.int32)
tmp22 = tl.load(in_ptr1 + (tmp21 + 5 * x1), xmask, eviction_policy=
'evict_last')
tmp23 = tmp20 * tmp22
tmp24 = tl.full([1], 1, tl.int64)
tmp25 = tmp21 + tmp24
tmp26 = tl.full([1], 4, tl.int64)
tmp27 = triton_helpers.minimum(tmp25, tmp26)
tmp28 = tl.load(in_ptr1 + (tmp27 + 5 * x1), xmask, eviction_policy=
'evict_last')
tmp29 = tmp19 * tmp28
tmp30 = tmp23 + tmp29
tmp31 = tmp0 * tmp30
tmp32 = tmp4 <= tmp5
tmp33 = tmp32.to(tl.float32)
tmp34 = tmp31 * tmp33
tl.store(out_ptr0 + x3, tmp34, xmask)
@triton.jit
def triton_poi_fused_sum_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 5), (5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused__to_copy_0[grid(64)](buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
triton_poi_fused_add_clamp_1[grid(64)](buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int16)
triton_per_fused_add_mul_rsub_sort_2[grid(64)](primals_1, buf2,
buf3, 64, 4, XBLOCK=8, num_warps=2, num_stages=1)
del primals_1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
triton_poi_fused_sort_3[grid(256)](buf3, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_frac_gather_mul_rsub_4[grid(256)](buf2,
primals_2, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_sum_5[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf5
return buf6, buf4, buf0, buf1, buf2
def deterministic_sort(s, tau):
"""
"Stochastic Optimization of Sorting Networks via Continuous Relaxations" https://openreview.net/forum?id=H1eSS3CcKX
Aditya Grover, Eric Wang, Aaron Zweig, Stefano Ermon
s: input elements to be sorted. Shape: batch_size x n x 1
tau: temperature for relaxation. Scalar.
"""
n = s.size()[1]
one = torch.ones((n, 1), dtype=torch.float32, device=s.device)
A_s = torch.abs(s - s.permute(0, 2, 1))
B = torch.matmul(A_s, torch.matmul(one, one.transpose(0, 1)))
scaling = (n + 1 - 2 * (torch.arange(n, device=s.device) + 1)).type(torch
.float32)
C = torch.matmul(s, scaling.unsqueeze(0))
P_max = (C - B).permute(0, 2, 1)
sm = torch.nn.Softmax(-1)
P_hat = sm(P_max / tau)
return P_hat
def cont_sort(x, perm=None, temp=1):
""" Helper function that calls deterministic_sort with the right shape.
Since it assumes a shape of (batch_size, n, 1) while the input x is of shape (batch_size, channels, n),
we can get this to the right shape by merging the first two dimensions.
If an existing perm is passed in, we compute the "inverse" (transpose of perm) and just use that to unsort x.
"""
original_size = x.size()
x = x.view(-1, x.size(2), 1)
if perm is None:
perm = deterministic_sort(x, temp)
else:
perm = perm.transpose(1, 2)
x = perm.matmul(x)
x = x.view(original_size)
return x, perm
def fill_sizes(sizes, x=None):
"""
sizes is a LongTensor of size [batch_size], containing the set sizes.
Each set size n is turned into [0/(n-1), 1/(n-1), ..., (n-2)/(n-1), 1, 0, 0, ..., 0, 0].
These are the ratios r at which f is evaluated at.
The 0s at the end are there for padding to the largest n in the batch.
If the input set x is passed in, it guarantees that the mask is the correct size even when sizes.max()
is less than x.size(), which can be a case if there is at least one padding element in each set in the batch.
"""
if x is not None:
max_size = x.size(2)
else:
max_size = sizes.max()
size_tensor = sizes.new(sizes.size(0), max_size).float().fill_(-1)
size_tensor = torch.arange(end=max_size, device=sizes.device, dtype=
torch.float32)
size_tensor = size_tensor.unsqueeze(0) / (sizes.float() - 1).clamp(min=1
).unsqueeze(1)
mask = size_tensor <= 1
mask = mask.unsqueeze(1)
return size_tensor.clamp(max=1), mask.float()
class FSPoolNew(nn.Module):
"""
Featurewise sort pooling. From:
FSPool: Learning Set Representations with Featurewise Sort Pooling.
Yan Zhang, Jonathon Hare, Adam Prügel-Bennett
https://arxiv.org/abs/1906.02795
https://github.com/Cyanogenoid/fspool
"""
def __init__(self, in_channels, n_pieces, relaxed=False):
"""
in_channels: Number of channels in input
n_pieces: Number of pieces in piecewise linear
relaxed: Use sorting networks relaxation instead of traditional sorting
"""
super().__init__()
self.n_pieces = n_pieces
self.weight = nn.Parameter(torch.zeros(in_channels, n_pieces + 1))
self.relaxed = relaxed
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.weight)
def forward_transpose(self, x, perm, n=None):
""" FSUnpool
x: FloatTensor of shape (batch_size, in_channels)
perm: Permutation matrix returned by forward function.
n: LongTensor fo shape (batch_size)
"""
if n is None:
n = x.new(x.size(0)).fill_(perm.size(2)).long()
sizes, mask = fill_sizes(n)
mask = mask.expand(mask.size(0), x.size(1), mask.size(2))
weight = self.determine_weight(sizes)
x = x.unsqueeze(2) * weight * mask.float()
if self.relaxed:
x, _ = cont_sort(x, perm)
else:
x = x.scatter(2, perm, x)
return x, mask
def determine_weight(self, sizes):
"""
Piecewise linear function. Evaluates f at the ratios in sizes.
This should be a faster implementation than doing the sum over max terms, since we know that most terms in it are 0.
"""
weight = self.weight.unsqueeze(0)
weight = weight.expand(sizes.size(0), weight.size(1), weight.size(2))
index = self.n_pieces * sizes
index = index.unsqueeze(1)
index = index.expand(index.size(0), weight.size(1), index.size(2))
idx = index.long()
frac = index.frac()
left = weight.gather(2, idx)
right = weight.gather(2, (idx + 1).clamp(max=self.n_pieces))
return (1 - frac) * left + frac * right
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0], output[1]
| zzirnheld/dspn | FSPool | false | 13,191 | [
"MIT"
]
| 0 | e0c248d9e55821847841cf0c67e97225277a6e75 | https://github.com/zzirnheld/dspn/tree/e0c248d9e55821847841cf0c67e97225277a6e75 |
LipschitzCube | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/fh/cfhy3agarxy4nw2x57myddkk4vpnhtk5yjzrljts6cbbw3pyiwgb.py
# Topologically Sorted Source Nodes: [ge, sub, mul, le, add, mul_1, add_1, gt, lt, mul_2, pow_1, mul_3, truediv, add_2], Original ATen: [aten.ge, aten.sub, aten.mul, aten.le, aten.add, aten.gt, aten.lt, aten.pow, aten.div]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# ge => ge
# gt => gt
# le => le
# lt => lt
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# pow_1 => pow_1
# sub => sub
# truediv => div
# Graph fragment:
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%arg0_1, 1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 0.6666666666666666), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%ge, %sub), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%arg0_1, -1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 0.6666666666666666), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%le, %add), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, -1), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%arg0_1, 1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%gt, %lt), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 3), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %pow_1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_3, 3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %div), kwargs = {})
triton_poi_fused_add_div_ge_gt_le_lt_mul_pow_sub_0 = async_compile.triton('triton_poi_fused_add_div_ge_gt_le_lt_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_ge_gt_le_lt_mul_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_ge_gt_le_lt_mul_pow_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 >= tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = 0.6666666666666666
tmp5 = tmp0 - tmp4
tmp6 = tmp3 * tmp5
tmp7 = -1.0
tmp8 = tmp0 <= tmp7
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp0 + tmp4
tmp11 = tmp9 * tmp10
tmp12 = tmp6 + tmp11
tmp13 = tmp0 > tmp7
tmp14 = tmp0 < tmp1
tmp15 = tmp13 & tmp14
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp0 * tmp0
tmp18 = tmp17 * tmp0
tmp19 = tmp16 * tmp18
tmp20 = 0.3333333333333333
tmp21 = tmp19 * tmp20
tmp22 = tmp12 + tmp21
tl.store(out_ptr0 + (x0), tmp22, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [ge, sub, mul, le, add, mul_1, add_1, gt, lt, mul_2, pow_1, mul_3, truediv, add_2], Original ATen: [aten.ge, aten.sub, aten.mul, aten.le, aten.add, aten.gt, aten.lt, aten.pow, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_ge_gt_le_lt_mul_pow_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LipschitzCube(nn.Module):
def forward(self, x):
return (x >= 1) * (x - 2 / 3) + (x <= -1) * (x + 2 / 3) + (x > -1) * (x
< 1) * x ** 3 / 3
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_ge_gt_le_lt_mul_pow_sub_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 >= tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = 0.6666666666666666
tmp5 = tmp0 - tmp4
tmp6 = tmp3 * tmp5
tmp7 = -1.0
tmp8 = tmp0 <= tmp7
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp0 + tmp4
tmp11 = tmp9 * tmp10
tmp12 = tmp6 + tmp11
tmp13 = tmp0 > tmp7
tmp14 = tmp0 < tmp1
tmp15 = tmp13 & tmp14
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp0 * tmp0
tmp18 = tmp17 * tmp0
tmp19 = tmp16 * tmp18
tmp20 = 0.3333333333333333
tmp21 = tmp19 * tmp20
tmp22 = tmp12 + tmp21
tl.store(out_ptr0 + x0, tmp22, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_ge_gt_le_lt_mul_pow_sub_0[grid(256)](arg0_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class LipschitzCubeNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| zxydi1992/residual-flows | LipschitzCube | false | 13,192 | [
"MIT"
]
| 0 | 4ec289681dc91cff5312b22f7ebed93838b440fb | https://github.com/zxydi1992/residual-flows/tree/4ec289681dc91cff5312b22f7ebed93838b440fb |
ResNetBlockGroupNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/td/ctdaujsqsbqqkdml3zhbs4v3z7besknlwcxtbauuyvjzywc5gc5r.py
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# out_1 => add, add_1, mul_1, rsqrt, var_mean
# out_2 => relu
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
triton_per_fused_native_group_norm_relu_0 = async_compile.triton('triton_per_fused_native_group_norm_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.store(out_ptr2 + (r1 + (64*x0)), tmp29, xmask)
tl.store(out_ptr3 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/iq/ciqnrtevtsjssfgzkfzct3qv5ylmetx724xrd7venf47fwwkkvw7.py
# Topologically Sorted Source Nodes: [out_4, out_5, out_6], Original ATen: [aten.native_group_norm, aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_4 => add_2, add_3, mul_3, rsqrt_1, var_mean_1
# out_5 => add_4
# out_6 => relu_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %unsqueeze_11), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %unsqueeze_8), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %primals_1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_4,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_per_fused_add_native_group_norm_relu_threshold_backward_1 = async_compile.triton('triton_per_fused_add_native_group_norm_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_native_group_norm_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_native_group_norm_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp32 = 0.0
tmp33 = tmp31 <= tmp32
tl.store(out_ptr2 + (r1 + (64*x0)), tmp31, xmask)
tl.store(out_ptr3 + (r1 + (64*x0)), tmp33, xmask)
tl.store(out_ptr4 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.native_group_norm, aten.relu]
stream0 = get_raw_stream(0)
triton_per_fused_native_group_norm_relu_0.run(buf0, primals_3, primals_4, buf1, buf5, buf4, 4, 64, grid=grid(4), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf10 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [out_4, out_5, out_6], Original ATen: [aten.native_group_norm, aten.add, aten.relu, aten.threshold_backward]
triton_per_fused_add_native_group_norm_relu_threshold_backward_1.run(buf6, primals_6, primals_7, primals_1, buf7, buf11, buf12, buf10, 4, 64, grid=grid(4), stream=stream0)
del primals_7
return (buf11, primals_1, primals_2, primals_3, primals_5, primals_6, buf0, reinterpret_tensor(buf1, (4, 1), (1, 1), 0), reinterpret_tensor(buf4, (4, 1), (1, 1), 0), buf5, buf6, reinterpret_tensor(buf7, (4, 1), (1, 1), 0), reinterpret_tensor(buf10, (4, 1), (1, 1), 0), buf12, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class ResNetBlockGroupNorm(nn.Module):
def __init__(self, inplanes, planes, num_groups, stride=1, activation=
'relu'):
super(ResNetBlockGroupNorm, self).__init__()
assert activation in ['relu', 'elu', 'leaky_relu']
self.conv1 = conv3x3(inplanes, planes, stride)
self.gn1 = nn.GroupNorm(num_groups, planes)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'elu':
self.activation = nn.ELU(inplace=True)
else:
self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1)
self.conv2 = conv3x3(planes, planes)
self.gn2 = nn.GroupNorm(num_groups, planes)
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(nn.Conv2d(inplanes, planes,
kernel_size=1, stride=stride, bias=False), nn.GroupNorm(
num_groups, planes))
self.downsample = downsample
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.gn1.weight, 1.0)
nn.init.constant_(self.gn1.bias, 0.0)
nn.init.constant_(self.gn2.weight, 1.0)
nn.init.constant_(self.gn2.bias, 0.0)
if self.downsample is not None:
assert isinstance(self.downsample[1], nn.GroupNorm)
nn.init.constant_(self.downsample[1].weight, 1.0)
nn.init.constant_(self.downsample[1].bias, 0.0)
def init(self, x, init_scale=1.0):
with torch.no_grad():
return self(x)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.gn1(out)
out = self.activation(out)
out = self.conv2(out)
out = self.gn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.activation(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4, 'num_groups': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.store(out_ptr2 + (r1 + 64 * x0), tmp29, xmask)
tl.store(out_ptr3 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_per_fused_add_native_group_norm_relu_threshold_backward_1(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, out_ptr3, out_ptr4,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp32 = 0.0
tmp33 = tmp31 <= tmp32
tl.store(out_ptr2 + (r1 + 64 * x0), tmp31, xmask)
tl.store(out_ptr3 + (r1 + 64 * x0), tmp33, xmask)
tl.store(out_ptr4 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_native_group_norm_relu_0[grid(4)](buf0, primals_3,
primals_4, buf1, buf5, buf4, 4, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del primals_4
buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf10 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_per_fused_add_native_group_norm_relu_threshold_backward_1[grid
(4)](buf6, primals_6, primals_7, primals_1, buf7, buf11, buf12,
buf10, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del primals_7
return (buf11, primals_1, primals_2, primals_3, primals_5, primals_6,
buf0, reinterpret_tensor(buf1, (4, 1), (1, 1), 0),
reinterpret_tensor(buf4, (4, 1), (1, 1), 0), buf5, buf6,
reinterpret_tensor(buf7, (4, 1), (1, 1), 0), reinterpret_tensor(
buf10, (4, 1), (1, 1), 0), buf12)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class ResNetBlockGroupNormNew(nn.Module):
def __init__(self, inplanes, planes, num_groups, stride=1, activation=
'relu'):
super(ResNetBlockGroupNormNew, self).__init__()
assert activation in ['relu', 'elu', 'leaky_relu']
self.conv1 = conv3x3(inplanes, planes, stride)
self.gn1 = nn.GroupNorm(num_groups, planes)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'elu':
self.activation = nn.ELU(inplace=True)
else:
self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1)
self.conv2 = conv3x3(planes, planes)
self.gn2 = nn.GroupNorm(num_groups, planes)
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(nn.Conv2d(inplanes, planes,
kernel_size=1, stride=stride, bias=False), nn.GroupNorm(
num_groups, planes))
self.downsample = downsample
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.gn1.weight, 1.0)
nn.init.constant_(self.gn1.bias, 0.0)
nn.init.constant_(self.gn2.weight, 1.0)
nn.init.constant_(self.gn2.bias, 0.0)
if self.downsample is not None:
assert isinstance(self.downsample[1], nn.GroupNorm)
nn.init.constant_(self.downsample[1].weight, 1.0)
nn.init.constant_(self.downsample[1].bias, 0.0)
def init(self, x, init_scale=1.0):
with torch.no_grad():
return self(x)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.gn1.weight
primals_4 = self.gn1.bias
primals_5 = self.conv2.weight
primals_6 = self.gn2.weight
primals_7 = self.gn2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| wp03052/wolf | ResNetBlockGroupNorm | false | 13,193 | [
"Apache-2.0"
]
| 0 | 49a582cafb829a2642db360c7d94c21439247ec7 | https://github.com/wp03052/wolf/tree/49a582cafb829a2642db360c7d94c21439247ec7 |
DeResNetBlockGroupNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/td/ctdaujsqsbqqkdml3zhbs4v3z7besknlwcxtbauuyvjzywc5gc5r.py
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# out_1 => add, add_1, mul_1, rsqrt, var_mean
# out_2 => relu
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
triton_per_fused_native_group_norm_relu_0 = async_compile.triton('triton_per_fused_native_group_norm_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.store(out_ptr2 + (r1 + (64*x0)), tmp29, xmask)
tl.store(out_ptr3 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/iq/ciqnrtevtsjssfgzkfzct3qv5ylmetx724xrd7venf47fwwkkvw7.py
# Topologically Sorted Source Nodes: [out_4, out_5, out_6], Original ATen: [aten.native_group_norm, aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_4 => add_2, add_3, mul_3, rsqrt_1, var_mean_1
# out_5 => add_4
# out_6 => relu_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %unsqueeze_11), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %unsqueeze_8), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %primals_1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_4,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_per_fused_add_native_group_norm_relu_threshold_backward_1 = async_compile.triton('triton_per_fused_add_native_group_norm_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_native_group_norm_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_native_group_norm_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp32 = 0.0
tmp33 = tmp31 <= tmp32
tl.store(out_ptr2 + (r1 + (64*x0)), tmp31, xmask)
tl.store(out_ptr3 + (r1 + (64*x0)), tmp33, xmask)
tl.store(out_ptr4 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.native_group_norm, aten.relu]
stream0 = get_raw_stream(0)
triton_per_fused_native_group_norm_relu_0.run(buf0, primals_3, primals_4, buf1, buf5, buf4, 4, 64, grid=grid(4), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf10 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [out_4, out_5, out_6], Original ATen: [aten.native_group_norm, aten.add, aten.relu, aten.threshold_backward]
triton_per_fused_add_native_group_norm_relu_threshold_backward_1.run(buf6, primals_6, primals_7, primals_1, buf7, buf11, buf12, buf10, 4, 64, grid=grid(4), stream=stream0)
del primals_7
return (buf11, primals_1, primals_2, primals_3, primals_5, primals_6, buf0, reinterpret_tensor(buf1, (4, 1), (1, 1), 0), reinterpret_tensor(buf4, (4, 1), (1, 1), 0), buf5, buf6, reinterpret_tensor(buf7, (4, 1), (1, 1), 0), reinterpret_tensor(buf10, (4, 1), (1, 1), 0), buf12, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def deconv3x3(in_planes, out_planes, stride=1, output_padding=0):
"""3x3 deconvolution with padding"""
return nn.ConvTranspose2d(in_planes, out_planes, kernel_size=3, stride=
stride, padding=1, output_padding=output_padding, bias=False)
class DeResNetBlockGroupNorm(nn.Module):
def __init__(self, inplanes, planes, num_groups, stride=1,
output_padding=0, activation='relu'):
super(DeResNetBlockGroupNorm, self).__init__()
assert activation in ['relu', 'elu', 'leaky_relu']
self.deconv1 = deconv3x3(inplanes, planes, stride, output_padding)
self.gn1 = nn.GroupNorm(num_groups, planes)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'elu':
self.activation = nn.ELU(inplace=True)
else:
self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1)
self.deconv2 = deconv3x3(planes, planes)
self.gn2 = nn.GroupNorm(num_groups, planes)
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(nn.ConvTranspose2d(inplanes, planes,
kernel_size=1, stride=stride, output_padding=output_padding,
bias=False), nn.GroupNorm(num_groups, planes))
self.downsample = downsample
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.gn1.weight, 1.0)
nn.init.constant_(self.gn1.bias, 0.0)
nn.init.constant_(self.gn2.weight, 1.0)
nn.init.constant_(self.gn2.bias, 0.0)
if self.downsample is not None:
assert isinstance(self.downsample[1], nn.GroupNorm)
nn.init.constant_(self.downsample[1].weight, 1.0)
nn.init.constant_(self.downsample[1].bias, 0.0)
def init(self, x, init_scale=1.0):
with torch.no_grad():
return self(x)
def forward(self, x):
residual = x
out = self.deconv1(x)
out = self.gn1(out)
out = self.activation(out)
out = self.deconv2(out)
out = self.gn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.activation(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4, 'num_groups': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.store(out_ptr2 + (r1 + 64 * x0), tmp29, xmask)
tl.store(out_ptr3 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_per_fused_add_native_group_norm_relu_threshold_backward_1(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, out_ptr3, out_ptr4,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp32 = 0.0
tmp33 = tmp31 <= tmp32
tl.store(out_ptr2 + (r1 + 64 * x0), tmp31, xmask)
tl.store(out_ptr3 + (r1 + 64 * x0), tmp33, xmask)
tl.store(out_ptr4 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_native_group_norm_relu_0[grid(4)](buf0, primals_3,
primals_4, buf1, buf5, buf4, 4, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del primals_4
buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf10 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_per_fused_add_native_group_norm_relu_threshold_backward_1[grid
(4)](buf6, primals_6, primals_7, primals_1, buf7, buf11, buf12,
buf10, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del primals_7
return (buf11, primals_1, primals_2, primals_3, primals_5, primals_6,
buf0, reinterpret_tensor(buf1, (4, 1), (1, 1), 0),
reinterpret_tensor(buf4, (4, 1), (1, 1), 0), buf5, buf6,
reinterpret_tensor(buf7, (4, 1), (1, 1), 0), reinterpret_tensor(
buf10, (4, 1), (1, 1), 0), buf12)
def deconv3x3(in_planes, out_planes, stride=1, output_padding=0):
"""3x3 deconvolution with padding"""
return nn.ConvTranspose2d(in_planes, out_planes, kernel_size=3, stride=
stride, padding=1, output_padding=output_padding, bias=False)
class DeResNetBlockGroupNormNew(nn.Module):
def __init__(self, inplanes, planes, num_groups, stride=1,
output_padding=0, activation='relu'):
super(DeResNetBlockGroupNormNew, self).__init__()
assert activation in ['relu', 'elu', 'leaky_relu']
self.deconv1 = deconv3x3(inplanes, planes, stride, output_padding)
self.gn1 = nn.GroupNorm(num_groups, planes)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'elu':
self.activation = nn.ELU(inplace=True)
else:
self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1)
self.deconv2 = deconv3x3(planes, planes)
self.gn2 = nn.GroupNorm(num_groups, planes)
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(nn.ConvTranspose2d(inplanes, planes,
kernel_size=1, stride=stride, output_padding=output_padding,
bias=False), nn.GroupNorm(num_groups, planes))
self.downsample = downsample
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.gn1.weight, 1.0)
nn.init.constant_(self.gn1.bias, 0.0)
nn.init.constant_(self.gn2.weight, 1.0)
nn.init.constant_(self.gn2.bias, 0.0)
if self.downsample is not None:
assert isinstance(self.downsample[1], nn.GroupNorm)
nn.init.constant_(self.downsample[1].weight, 1.0)
nn.init.constant_(self.downsample[1].bias, 0.0)
def init(self, x, init_scale=1.0):
with torch.no_grad():
return self(x)
def forward(self, input_0):
primals_2 = self.deconv1.weight
primals_3 = self.gn1.weight
primals_4 = self.gn1.bias
primals_5 = self.deconv2.weight
primals_6 = self.gn2.weight
primals_7 = self.gn2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| wp03052/wolf | DeResNetBlockGroupNorm | false | 13,194 | [
"Apache-2.0"
]
| 0 | 49a582cafb829a2642db360c7d94c21439247ec7 | https://github.com/wp03052/wolf/tree/49a582cafb829a2642db360c7d94c21439247ec7 |
FullSort | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/zu/czuykkrx3bycxkuqvpdouv5j3rcauet5ovtjh3346eg2mc7xjxpx.py
# Topologically Sorted Source Nodes: [sort], Original ATen: [aten.sort]
# Source node to ATen node mapping:
# sort => sort
# Graph fragment:
# %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%arg0_1, 1), kwargs = {})
triton_per_fused_sort_0 = async_compile.triton('triton_per_fused_sort_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sort_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sort_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (64*x1)), xmask, other=0.0)
tmp1 = r2
tmp2 = tmp1.to(tl.int16)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5, tmp6, = triton_helpers.sort_with_index(tmp3, tmp4, None, 1, stable=False, descending=False)
tl.store(out_ptr0 + (x0 + (16*r2) + (64*x1)), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sort], Original ATen: [aten.sort]
stream0 = get_raw_stream(0)
triton_per_fused_sort_0.run(arg0_1, buf0, 64, 4, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class FullSort(nn.Module):
def forward(self, x):
return torch.sort(x, 1)[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_sort_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = r2
tmp2 = tmp1.to(tl.int16)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5, _tmp6 = triton_helpers.sort_with_index(tmp3, tmp4, None, 1,
stable=False, descending=False)
tl.store(out_ptr0 + (x0 + 16 * r2 + 64 * x1), tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_sort_0[grid(64)](arg0_1, buf0, 64, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del arg0_1
return buf0,
class FullSortNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| zxydi1992/residual-flows | FullSort | false | 13,195 | [
"MIT"
]
| 0 | 4ec289681dc91cff5312b22f7ebed93838b440fb | https://github.com/zxydi1992/residual-flows/tree/4ec289681dc91cff5312b22f7ebed93838b440fb |
CNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/oh/cohhzozgklcdr3g2cpdmnac2zvbvmk53smneafef4zekz5p2kieu.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/5z/c5z4ugnz2tz6etzq5gfjssvgajfzqbzizudkvgjbmhjkyjb3kwgl.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/tk/ctkr5j63ngqpsdf5zpn24uwouxiguv7jseip5x6trqjye5gti4rn.py
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_3 => convolution_1
# x_4 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ec/cecaw7x564jfwuidwdxtkrmanoelm5tnnaxaobkxqxnnoobdi5sf.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_5 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/zx/czxc6cytpnsc7fjujp5mqi6z4mwxiye7lpbvfu7ihxjqftq4fzx4.py
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_8 => relu_2
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ty/ctyfxp64fjwmz4naxc3duvshputqp2vdlyxkf2l7dpsdumkwmdpe.py
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_10 => relu_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/jr/cjra477igxtot4bhv5bzdyliqhrsl24s254uusqxosnd22fcfkwp.py
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# x_12 => amax, exp, log, sub, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_2, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_2, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_per_fused__log_softmax_6 = async_compile.triton('triton_per_fused__log_softmax_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_6(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (8, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (64, 1024), (1024, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (32, 64), (64, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (10, 32), (32, 1))
assert_size_stride(primals_11, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 131072, grid=grid(131072), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 8, 32, 32), (8192, 1024, 32, 1), torch.float32)
buf3 = empty_strided_cuda((4, 8, 32, 32), (8192, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 65536, grid=grid(65536), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 16384, grid=grid(16384), stream=stream0)
buf8 = empty_strided_cuda((16, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf7, (16, 1024), (1024, 1), 0), reinterpret_tensor(primals_6, (1024, 64), (1, 1024), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.relu]
triton_poi_fused_relu_4.run(buf9, primals_7, 1024, grid=grid(1024), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (64, 32), (1, 64), 0), out=buf10)
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf11, primals_9, 512, grid=grid(512), stream=stream0)
del primals_9
buf12 = empty_strided_cuda((16, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(primals_10, (32, 10), (1, 32), 0), alpha=1, beta=1, out=buf12)
del primals_11
buf15 = empty_strided_cuda((16, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten._log_softmax]
triton_per_fused__log_softmax_6.run(buf12, buf15, 16, 10, grid=grid(16), stream=stream0)
del buf12
return (buf15, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (16, 1024), (1024, 1), 0), buf9, buf11, buf15, primals_10, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((32, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((10, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=8, kernel_size=3,
padding=1)
self.conv2 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=
3, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc1 = nn.Linear(8 * 8 * 16, 64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.pool(x)
x = self.conv2(x)
x = F.relu(x)
x = self.pool(x)
x = x.view(-1, 8 * 8 * 16)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
x = F.log_softmax(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 8
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp12 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_6(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (8, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (64, 1024), (1024, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (32, 64), (64, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (10, 32), (32, 1))
assert_size_stride(primals_11, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(131072)](buf1, primals_2,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 8, 32, 32), (8192, 1024, 32, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 8, 32, 32), (8192, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(32768)](buf1, buf2,
buf3, 32768, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(65536)](buf5, primals_5,
65536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1),
torch.int8)
buf7 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_3[grid(16384)](buf5, buf6,
buf7, 16384, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((16, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (16, 1024), (1024, 1), 0
), reinterpret_tensor(primals_6, (1024, 64), (1, 1024), 0), out
=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(1024)](buf9, primals_7, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (64, 32), (1,
64), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_5[grid(512)](buf11, primals_9, 512, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((16, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(
primals_10, (32, 10), (1, 32), 0), alpha=1, beta=1, out=buf12)
del primals_11
buf15 = empty_strided_cuda((16, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_6[grid(16)](buf12, buf15, 16, 10,
XBLOCK=1, num_warps=2, num_stages=1)
del buf12
return (buf15, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (16, 1024), (1024, 1), 0), buf9,
buf11, buf15, primals_10, primals_8, primals_6)
class CNNNew(nn.Module):
def __init__(self):
super(CNNNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=8, kernel_size=3,
padding=1)
self.conv2 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=
3, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc1 = nn.Linear(8 * 8 * 16, 64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| zzzzzkjs/quick_draw_clone | CNN | false | 13,196 | [
"MIT"
]
| 0 | a80d4c03b4cb88e31ae8e143d4042b37cdacc38e | https://github.com/zzzzzkjs/quick_draw_clone/tree/a80d4c03b4cb88e31ae8e143d4042b37cdacc38e |
CQAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/gv/cgvyzvgb4s6skjl2lcdf54y4sqcmmvdkvv2hcpobs5hraiugivrp.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone, aten._unsafe_view]
# Source node to ATen node mapping:
# matmul => clone_2, view
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone_2, [16, 4]), kwargs = {})
triton_poi_fused__unsafe_view_clone_0 = async_compile.triton('triton_poi_fused__unsafe_view_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_view_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + ((4*x1) + (16*(y0 // 4)) + (y0 % 4)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/pa/cpa2xlx5dvbxg7yen7xxu7aclanjppmhz3pxswu4bw5tkkqjh7rr.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute, %primals_5), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/uf/cufxi6e4u5mvvfd5sut4qxvbrhzb7usdtxtg3ze2t6ajs3np5275.py
# Topologically Sorted Source Nodes: [add, res, res_1, mul_1, sub, mul_2, add_2, mul_3, sub_1, mul_4, add_3], Original ATen: [aten.add, aten.mul, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_2 => add_3
# add_3 => add_4
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# res => add_1
# res_1 => add_2
# sub => sub
# sub_1 => sub_2
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, %expand_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %bmm), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %primals_6), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, %primals_8), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %primals_8), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -1e+30), kwargs = {})
# %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, %primals_7), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %primals_7), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, -1e+30), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_4), kwargs = {})
triton_poi_fused_add_mul_rsub_2 = async_compile.triton('triton_poi_fused_add_mul_rsub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 4)
x0 = xindex % 4
x2 = (xindex // 16)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4), xmask)
tmp5 = tl.load(in_ptr3 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp8 = tl.load(in_ptr4 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr5 + (x3), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp4 + tmp6
tmp9 = tmp7 * tmp8
tmp10 = 1.0
tmp11 = tmp10 - tmp8
tmp12 = -1e+30
tmp13 = tmp11 * tmp12
tmp14 = tmp9 + tmp13
tmp16 = tmp7 * tmp15
tmp17 = tmp10 - tmp15
tmp18 = tmp17 * tmp12
tmp19 = tmp16 + tmp18
tl.store(out_ptr0 + (x4), tmp14, xmask)
tl.store(out_ptr1 + (x4), tmp19, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/7s/c7spagnqvsgjrukyw5jujzjmswxuigeuvpyhxgdob766q2gfvgzr.py
# Topologically Sorted Source Nodes: [S1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# S1 => amax, exp, sub_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_3, [2], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/dw/cdwqsjnh2osfmjr2utzzaqdg2vrfivzkuhareq3urgidllj2bsvr.py
# Topologically Sorted Source Nodes: [S1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# S1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/5q/c5q2cwpxtjxxz7h6xna43qv2cdyea56heflv2ye7d7mtmtdm7twa.py
# Topologically Sorted Source Nodes: [S2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# S2 => amax_1, exp_1, sub_3
# Graph fragment:
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_4, [1], True), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %amax_1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/cg/ccg5e776j77ye72qtmo5nfcxjaz6zv34474xpm34f62r6hfxzo6g.py
# Topologically Sorted Source Nodes: [S2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# S2 => div_1, sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {})
triton_poi_fused__softmax_6 = async_compile.triton('triton_poi_fused__softmax_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/pj/cpjglqinm2mgqqclt3c66vcfcnwohgtuw2thbq5rdw75hhx4fn5r.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%permute, %bmm_1, %mul_5, %mul_6], 2), kwargs = {})
triton_poi_fused_cat_7 = async_compile.triton('triton_poi_fused_cat_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16) % 4
x2 = (xindex // 64)
x3 = (xindex // 16)
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1 + (4*x0) + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + ((4*x3) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (x1 + (4*((-8) + x0)) + (16*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + ((4*x3) + ((-8) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp14, tmp17, tmp18)
tmp20 = tmp0 >= tmp12
tmp21 = tl.full([1], 16, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tl.load(in_ptr0 + (x1 + (4*((-12) + x0)) + (16*x2)), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tl.load(in_ptr2 + ((4*x3) + ((-12) + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp20, tmp25, tmp26)
tmp28 = tl.where(tmp14, tmp19, tmp27)
tmp29 = tl.where(tmp9, tmp10, tmp28)
tmp30 = tl.where(tmp4, tmp5, tmp29)
tl.store(out_ptr0 + (x4), tmp30, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 1), (1, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_6, (1, ), (1, ))
assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_8, (4, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone, aten._unsafe_view]
stream0 = get_raw_stream(0)
triton_poi_fused__unsafe_view_clone_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0)
buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(buf0, primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone, aten._unsafe_view]
triton_poi_fused__unsafe_view_clone_0.run(primals_2, buf2, 16, 4, grid=grid(16, 4), stream=stream0)
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm]
extern_kernels.mm(buf2, primals_4, out=buf3)
del primals_4
buf4 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(primals_1, primals_5, buf4, 64, grid=grid(64), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, subres2], Original ATen: [aten.mul, aten.bmm]
extern_kernels.bmm(buf4, primals_2, out=buf5)
buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0); del buf4 # reuse
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, res, res_1, mul_1, sub, mul_2, add_2, mul_3, sub_1, mul_4, add_3], Original ATen: [aten.add, aten.mul, aten.rsub]
triton_poi_fused_add_mul_rsub_2.run(buf1, buf3, buf5, primals_6, primals_8, primals_7, buf6, buf9, 64, grid=grid(64), stream=stream0)
del buf1
del buf3
del primals_6
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [S1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf6, buf7, 64, grid=grid(64), stream=stream0)
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [S1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf7, buf8, 64, grid=grid(64), stream=stream0)
buf10 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [S2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf9, buf10, 64, grid=grid(64), stream=stream0)
buf11 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [S2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_6.run(buf10, buf11, 64, grid=grid(64), stream=stream0)
buf12 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [A], Original ATen: [aten.bmm]
extern_kernels.bmm(buf8, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), out=buf12)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf8, reinterpret_tensor(buf11, (4, 4, 4), (16, 1, 4), 0), out=buf13)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [B], Original ATen: [aten.bmm]
extern_kernels.bmm(buf13, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), out=buf14)
del buf13
buf15 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
triton_poi_fused_cat_7.run(primals_1, buf12, buf14, buf15, 256, grid=grid(256), stream=stream0)
del buf12
del buf14
return (reinterpret_tensor(buf15, (4, 16, 4), (64, 1, 16), 0), primals_7, primals_8, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), primals_2, buf8, buf11, reinterpret_tensor(buf2, (4, 16), (1, 4), 0), reinterpret_tensor(buf0, (4, 16), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def mask_logits(target, mask):
mask = mask.type(torch.float32)
return target * mask + (1 - mask) * -1e+30
class CQAttention(nn.Module):
def __init__(self, d_model, dropout=0.1):
super().__init__()
w4C = torch.empty(d_model, 1)
w4Q = torch.empty(d_model, 1)
w4mlu = torch.empty(1, 1, d_model)
nn.init.xavier_uniform_(w4C)
nn.init.xavier_uniform_(w4Q)
nn.init.xavier_uniform_(w4mlu)
self.w4C = nn.Parameter(w4C)
self.w4Q = nn.Parameter(w4Q)
self.w4mlu = nn.Parameter(w4mlu)
bias = torch.empty(1)
nn.init.constant_(bias, 0)
self.bias = nn.Parameter(bias)
self.dropout = dropout
def forward(self, C, Q, Cmask, Qmask):
C = C.transpose(1, 2)
Q = Q.transpose(1, 2)
batch_size_c = C.size()[0]
_batch_size, Lc, _d_model = C.shape
_batch_size, Lq, _d_model = Q.shape
S = self.trilinear_for_attention(C, Q)
Cmask = Cmask.view(batch_size_c, Lc, 1)
Qmask = Qmask.view(batch_size_c, 1, Lq)
S1 = F.softmax(mask_logits(S, Qmask), dim=2)
S2 = F.softmax(mask_logits(S, Cmask), dim=1)
A = torch.bmm(S1, Q)
B = torch.bmm(torch.bmm(S1, S2.transpose(1, 2)), C)
out = torch.cat([C, A, torch.mul(C, A), torch.mul(C, B)], dim=2)
return out.transpose(1, 2)
def trilinear_for_attention(self, C, Q):
_batch_size, Lc, _d_model = C.shape
_batch_size, Lq, _d_model = Q.shape
dropout = self.dropout
C = F.dropout(C, p=dropout, training=self.training)
Q = F.dropout(Q, p=dropout, training=self.training)
subres0 = torch.matmul(C, self.w4C).expand([-1, -1, Lq])
subres1 = torch.matmul(Q, self.w4Q).transpose(1, 2).expand([-1, Lc, -1]
)
subres2 = torch.matmul(C * self.w4mlu, Q.transpose(1, 2))
res = subres0 + subres1 + subres2
res += self.bias
return res
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
1]), torch.rand([4, 1, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (4 * x1 + 16 * (y0 // 4) + y0 % 4), xmask &
ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr2 + x4, xmask)
tmp5 = tl.load(in_ptr3 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp8 = tl.load(in_ptr4 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr5 + x3, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp4 + tmp6
tmp9 = tmp7 * tmp8
tmp10 = 1.0
tmp11 = tmp10 - tmp8
tmp12 = -1e+30
tmp13 = tmp11 * tmp12
tmp14 = tmp9 + tmp13
tmp16 = tmp7 * tmp15
tmp17 = tmp10 - tmp15
tmp18 = tmp17 * tmp12
tmp19 = tmp16 + tmp18
tl.store(out_ptr0 + x4, tmp14, xmask)
tl.store(out_ptr1 + x4, tmp19, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16 % 4
x2 = xindex // 64
x3 = xindex // 16
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1 + 4 * x0 + 16 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x3 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (x1 + 4 * (-8 + x0) + 16 * x2), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + (4 * x3 + (-8 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp14, tmp17, tmp18)
tmp20 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp23 = tl.load(in_ptr0 + (x1 + 4 * (-12 + x0) + 16 * x2), tmp20 &
xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tl.load(in_ptr2 + (4 * x3 + (-12 + x0)), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp20, tmp25, tmp26)
tmp28 = tl.where(tmp14, tmp19, tmp27)
tmp29 = tl.where(tmp9, tmp10, tmp28)
tmp30 = tl.where(tmp4, tmp5, tmp29)
tl.store(out_ptr0 + x4, tmp30, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 1), (1, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_6, (1,), (1,))
assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_8, (4, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_view_clone_0[grid(16, 4)](primals_1, buf0,
16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf0, primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused__unsafe_view_clone_0[grid(16, 4)](primals_2, buf2,
16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf2, primals_4, out=buf3)
del primals_4
buf4 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
triton_poi_fused_mul_1[grid(64)](primals_1, primals_5, buf4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf4, primals_2, out=buf5)
buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0)
del buf4
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_2[grid(64)](buf1, buf3, buf5,
primals_6, primals_8, primals_7, buf6, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf1
del buf3
del primals_6
buf7 = buf5
del buf5
triton_poi_fused__softmax_3[grid(64)](buf6, buf7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused__softmax_4[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf10 = buf7
del buf7
triton_poi_fused__softmax_5[grid(64)](buf9, buf10, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf11 = buf9
del buf9
triton_poi_fused__softmax_6[grid(64)](buf10, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf12 = buf10
del buf10
extern_kernels.bmm(buf8, reinterpret_tensor(primals_2, (4, 4, 4), (
16, 1, 4), 0), out=buf12)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf8, reinterpret_tensor(buf11, (4, 4, 4), (16,
1, 4), 0), out=buf13)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf13, reinterpret_tensor(primals_1, (4, 4, 4),
(16, 1, 4), 0), out=buf14)
del buf13
buf15 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_cat_7[grid(256)](primals_1, buf12, buf14, buf15,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf12
del buf14
return reinterpret_tensor(buf15, (4, 16, 4), (64, 1, 16), 0
), primals_7, primals_8, reinterpret_tensor(primals_1, (4, 4, 4), (
16, 1, 4), 0), primals_2, buf8, buf11, reinterpret_tensor(buf2, (4,
16), (1, 4), 0), reinterpret_tensor(buf0, (4, 16), (1, 4), 0)
def mask_logits(target, mask):
mask = mask.type(torch.float32)
return target * mask + (1 - mask) * -1e+30
class CQAttentionNew(nn.Module):
def __init__(self, d_model, dropout=0.1):
super().__init__()
w4C = torch.empty(d_model, 1)
w4Q = torch.empty(d_model, 1)
w4mlu = torch.empty(1, 1, d_model)
nn.init.xavier_uniform_(w4C)
nn.init.xavier_uniform_(w4Q)
nn.init.xavier_uniform_(w4mlu)
self.w4C = nn.Parameter(w4C)
self.w4Q = nn.Parameter(w4Q)
self.w4mlu = nn.Parameter(w4mlu)
bias = torch.empty(1)
nn.init.constant_(bias, 0)
self.bias = nn.Parameter(bias)
self.dropout = dropout
def trilinear_for_attention(self, C, Q):
_batch_size, Lc, _d_model = C.shape
_batch_size, Lq, _d_model = Q.shape
dropout = self.dropout
C = F.dropout(C, p=dropout, training=self.training)
Q = F.dropout(Q, p=dropout, training=self.training)
subres0 = torch.matmul(C, self.w4C).expand([-1, -1, Lq])
subres1 = torch.matmul(Q, self.w4Q).transpose(1, 2).expand([-1, Lc, -1]
)
subres2 = torch.matmul(C * self.w4mlu, Q.transpose(1, 2))
res = subres0 + subres1 + subres2
res += self.bias
return res
def forward(self, input_0, input_1, input_2, input_3):
primals_3 = self.w4C
primals_4 = self.w4Q
primals_5 = self.w4mlu
primals_6 = self.bias
primals_1 = input_0
primals_2 = input_1
primals_7 = input_2
primals_8 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| timgianitsos/squad | CQAttention | false | 13,197 | [
"MIT"
]
| 0 | 6ab502652e3528cfeeddfb8eba05221443a35294 | https://github.com/timgianitsos/squad/tree/6ab502652e3528cfeeddfb8eba05221443a35294 |
LipNormConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/in/cinvfeqy6ry6tu24qtajhlnsldmnir4gax6ikx6vnnqb4avr5ien.py
# Topologically Sorted Source Nodes: [w_scale, truediv, sigmoid, weight], Original ATen: [aten.norm, aten.div, aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# truediv => div
# w_scale => abs_1, pow_2, sum_1
# weight => mul
# Graph fragment:
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%view,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_1, [1]), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 1.0), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %view_1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_2,), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %sigmoid), kwargs = {})
triton_per_fused_div_mul_norm_sigmoid_0 = async_compile.triton('triton_per_fused_div_mul_norm_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_norm_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_mul_norm_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl_math.abs(tmp0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = tmp0 / tmp5
tmp8 = tl.sigmoid(tmp7)
tmp9 = tmp6 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp5, xmask)
tl.store(out_ptr0 + (r1 + (64*x0)), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/2i/c2ia6clymej2axaxwh5dhlf5hhex6emmkbazo7542zo3gcyaffyw.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_4, %mul, %primals_3, [1, 1], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 81) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [w_scale, truediv, sigmoid, weight], Original ATen: [aten.norm, aten.div, aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_div_mul_norm_sigmoid_0.run(buf1, primals_1, primals_2, buf2, 4, 64, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(primals_4, buf2, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 9, 9), (324, 81, 9, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf4, primals_3, 1296, grid=grid(1296), stream=stream0)
del primals_3
return (buf4, primals_1, primals_2, primals_4, reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def _max_except_dim(input, dim):
maxed = input
for axis in range(input.ndimension() - 1, dim, -1):
maxed, _ = maxed.max(axis, keepdim=True)
for axis in range(dim - 1, -1, -1):
maxed, _ = maxed.max(axis, keepdim=True)
return maxed
def _norm_except_dim(w, norm_type, dim):
if norm_type == 1 or norm_type == 2:
return torch.norm_except_dim(w, norm_type, dim)
elif norm_type == float('inf'):
return _max_except_dim(w, dim)
def operator_norm_settings(domain, codomain):
if domain == 1 and codomain == 1:
max_across_input_dims = True
norm_type = 1
elif domain == 1 and codomain == 2:
max_across_input_dims = True
norm_type = 2
elif domain == 1 and codomain == float('inf'):
max_across_input_dims = True
norm_type = float('inf')
elif domain == 2 and codomain == float('inf'):
max_across_input_dims = False
norm_type = 2
elif domain == float('inf') and codomain == float('inf'):
max_across_input_dims = False
norm_type = 1
else:
raise ValueError('Unknown combination of domain "{}" and codomain "{}"'
.format(domain, codomain))
return max_across_input_dims, norm_type
def _logit(p):
p = torch.max(torch.ones(1) * 0.1, torch.min(torch.ones(1) * 0.9, p))
return torch.log(p + 1e-10) + torch.log(1 - p + 1e-10)
class LipNormConv2d(nn.Conv2d):
"""Lipschitz constant defined using operator norms."""
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, bias=True, coeff=0.97, domain=float('inf'), codomain=float
('inf'), local_constraint=True, **unused_kwargs):
del unused_kwargs
super(LipNormConv2d, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, bias)
self.coeff = coeff
self.domain = domain
self.codomain = codomain
self.local_constraint = local_constraint
max_across_input_dims, self.norm_type = operator_norm_settings(self
.domain, self.codomain)
self.max_across_dim = 1 if max_across_input_dims else 0
with torch.no_grad():
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=
self.max_across_dim)
if not self.local_constraint:
w_scale = w_scale.max()
self.scale = nn.Parameter(_logit(w_scale / self.coeff))
def compute_weight(self):
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=self.
max_across_dim)
if not self.local_constraint:
w_scale = w_scale.max()
return self.weight / w_scale * torch.sigmoid(self.scale)
def forward(self, input):
weight = self.compute_weight()
return F.conv2d(input, weight, self.bias, self.stride, self.padding,
1, 1)
def extra_repr(self):
s = super(LipNormConv2d, self).extra_repr()
return s + ', coeff={}, domain={}, codomain={}, local={}'.format(self
.coeff, self.domain, self.codomain, self.local_constraint)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4,
'stride': 1, 'padding': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_div_mul_norm_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl_math.abs(tmp0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = tmp0 / tmp5
tmp8 = tl.sigmoid(tmp7)
tmp9 = tmp6 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp5, xmask)
tl.store(out_ptr0 + (r1 + 64 * x0), tmp9, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 81 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_div_mul_norm_sigmoid_0[grid(4)](buf1, primals_1,
primals_2, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf3 = extern_kernels.convolution(primals_4, buf2, stride=(1, 1),
padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 9, 9), (324, 81, 9, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_1[grid(1296)](buf4, primals_3, 1296,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf4, primals_1, primals_2, primals_4, reinterpret_tensor(buf1,
(4, 1, 1, 1), (1, 1, 1, 1), 0), buf2
def _max_except_dim(input, dim):
maxed = input
for axis in range(input.ndimension() - 1, dim, -1):
maxed, _ = maxed.max(axis, keepdim=True)
for axis in range(dim - 1, -1, -1):
maxed, _ = maxed.max(axis, keepdim=True)
return maxed
def _norm_except_dim(w, norm_type, dim):
if norm_type == 1 or norm_type == 2:
return torch.norm_except_dim(w, norm_type, dim)
elif norm_type == float('inf'):
return _max_except_dim(w, dim)
def operator_norm_settings(domain, codomain):
if domain == 1 and codomain == 1:
max_across_input_dims = True
norm_type = 1
elif domain == 1 and codomain == 2:
max_across_input_dims = True
norm_type = 2
elif domain == 1 and codomain == float('inf'):
max_across_input_dims = True
norm_type = float('inf')
elif domain == 2 and codomain == float('inf'):
max_across_input_dims = False
norm_type = 2
elif domain == float('inf') and codomain == float('inf'):
max_across_input_dims = False
norm_type = 1
else:
raise ValueError('Unknown combination of domain "{}" and codomain "{}"'
.format(domain, codomain))
return max_across_input_dims, norm_type
def _logit(p):
p = torch.max(torch.ones(1) * 0.1, torch.min(torch.ones(1) * 0.9, p))
return torch.log(p + 1e-10) + torch.log(1 - p + 1e-10)
class LipNormConv2dNew(nn.Conv2d):
"""Lipschitz constant defined using operator norms."""
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, bias=True, coeff=0.97, domain=float('inf'), codomain=float
('inf'), local_constraint=True, **unused_kwargs):
del unused_kwargs
super(LipNormConv2dNew, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, bias)
self.coeff = coeff
self.domain = domain
self.codomain = codomain
self.local_constraint = local_constraint
max_across_input_dims, self.norm_type = operator_norm_settings(self
.domain, self.codomain)
self.max_across_dim = 1 if max_across_input_dims else 0
with torch.no_grad():
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=
self.max_across_dim)
if not self.local_constraint:
w_scale = w_scale.max()
self.scale = nn.Parameter(_logit(w_scale / self.coeff))
def compute_weight(self):
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=self.
max_across_dim)
if not self.local_constraint:
w_scale = w_scale.max()
return self.weight / w_scale * torch.sigmoid(self.scale)
def extra_repr(self):
s = super(LipNormConv2dNew, self).extra_repr()
return s + ', coeff={}, domain={}, codomain={}, local={}'.format(self
.coeff, self.domain, self.codomain, self.local_constraint)
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = self.scale
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| zxydi1992/residual-flows | LipNormConv2d | false | 13,198 | [
"MIT"
]
| 0 | 4ec289681dc91cff5312b22f7ebed93838b440fb | https://github.com/zxydi1992/residual-flows/tree/4ec289681dc91cff5312b22f7ebed93838b440fb |
ConvStem2 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/kv/ckvoe7jp43a75a4odusgovwuzznuq5y742qxyng6gpjrescbn2ul.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 16
x0 = xindex % 16
x3 = (xindex // 16)
x4 = xindex
tmp0 = (-1) + (2*x1)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x0)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-33) + (2*x0) + (64*x3)), tmp10, eviction_policy='evict_last', other=float("-inf"))
tmp12 = 2*x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-32) + (2*x0) + (64*x3)), tmp16, eviction_policy='evict_last', other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x0)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-31) + (2*x0) + (64*x3)), tmp23, eviction_policy='evict_last', other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2*x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-1) + (2*x0) + (64*x3)), tmp30, eviction_policy='evict_last', other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + ((2*x0) + (64*x3)), tmp33, eviction_policy='evict_last', other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x3)), tmp36, eviction_policy='evict_last', other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + (2*x1)
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (31 + (2*x0) + (64*x3)), tmp43, eviction_policy='evict_last', other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x3)), tmp46, eviction_policy='evict_last', other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x3)), tmp49, eviction_policy='evict_last', other=float("-inf"))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + (x4), tmp51, None)
tl.store(out_ptr1 + (x4), tmp76, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf1 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32)
buf2 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0.run(buf0, buf1, buf2, 65536, grid=grid(65536), stream=stream0)
return (buf1, primals_1, primals_2, buf0, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 3, 7, 7), (147, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ConvStem2(nn.Module):
def __init__(self, in_chans=3, out_chans=64, kernel_size=7, stride=2):
super(ConvStem2, self).__init__()
self.conv = nn.Conv2d(in_chans, out_chans, kernel_size=kernel_size,
stride=stride, padding=kernel_size // 2, bias=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.conv(x)
x = self.maxpool(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 16
x0 = xindex % 16
x3 = xindex // 16
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-33 + 2 * x0 + 64 * x3), tmp10,
eviction_policy='evict_last', other=float('-inf'))
tmp12 = 2 * x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-32 + 2 * x0 + 64 * x3), tmp16,
eviction_policy='evict_last', other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-31 + 2 * x0 + 64 * x3), tmp23,
eviction_policy='evict_last', other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + 2 * x0 + 64 * x3), tmp30,
eviction_policy='evict_last', other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (2 * x0 + 64 * x3), tmp33, eviction_policy=
'evict_last', other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x3), tmp36,
eviction_policy='evict_last', other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (31 + 2 * x0 + 64 * x3), tmp43,
eviction_policy='evict_last', other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x3), tmp46,
eviction_policy='evict_last', other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x3), tmp49,
eviction_policy='evict_last', other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + x4, tmp51, None)
tl.store(out_ptr1 + x4, tmp76, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2,
2), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf1 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.float32)
buf2 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.int8)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(65536)](buf0, buf1,
buf2, 65536, XBLOCK=256, num_warps=4, num_stages=1)
return buf1, primals_1, primals_2, buf0, buf2
class ConvStem2New(nn.Module):
def __init__(self, in_chans=3, out_chans=64, kernel_size=7, stride=2):
super(ConvStem2New, self).__init__()
self.conv = nn.Conv2d(in_chans, out_chans, kernel_size=kernel_size,
stride=stride, padding=kernel_size // 2, bias=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| yoookoo/cnn-transformer | ConvStem2 | false | 13,199 | [
"Apache-2.0"
]
| 0 | 8ee54ea944ed752162e3098db7f8f689ec150efe | https://github.com/yoookoo/cnn-transformer/tree/8ee54ea944ed752162e3098db7f8f689ec150efe |
NICEMLPBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/zi/czi6taqk3yywywfl3iwbejutxysbxi6hrg6s2rrrevzoemnmagnw.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_6, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/6h/c6hgrncbhy7kjladlqflhqnw52mciqxt6qj53hxyw2giskevmcnl.py
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view]
# Source node to ATen node mapping:
# linear_1 => view_7
# Graph fragment:
# %view_7 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_6, [64, 4]), kwargs = {})
triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*((x1 % 4) // 4)) + (64*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/4a/c4aftmtnqwr7hu6cf5jwqf3p4keti6kfufy6ubsztgw7benvzzji.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm => pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_7, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
triton_poi_fused__weight_norm_interface_2 = async_compile.triton('triton_poi_fused__weight_norm_interface_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__weight_norm_interface_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/33/c33k5za2aswszitlq2wvmlb5z4qyqau47wey64xfk6mec52uns7s.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm => div, mul
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_6, %pow_2), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_7, %div), kwargs = {})
triton_poi_fused__weight_norm_interface_3 = async_compile.triton('triton_poi_fused__weight_norm_interface_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__weight_norm_interface_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 1), (1, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf10, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_2.run(primals_7, buf4, 4, grid=grid(4), stream=stream0)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_3.run(primals_7, primals_6, buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf6, primals_5, buf9, 256, grid=grid(256), stream=stream0)
del primals_5
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
buf8 = reinterpret_tensor(buf6, (64, 4), (4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf7, reinterpret_tensor(buf5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_8
return (reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf5, primals_6, primals_7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, buf4, buf7, buf5, buf9, primals_4, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LinearWeightNorm(nn.Module):
def __init__(self, in_features, out_features, bias=True):
super(LinearWeightNorm, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.linear.weight, mean=0.0, std=0.05)
if self.linear.bias is not None:
nn.init.constant_(self.linear.bias, 0)
self.linear = nn.utils.weight_norm(self.linear)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def init(self, x, init_scale=1.0):
with torch.no_grad():
out = self(x).view(-1, self.linear.out_features)
mean = out.mean(dim=0)
std = out.std(dim=0)
inv_stdv = init_scale / (std + 1e-06)
self.linear.weight_g.mul_(inv_stdv.unsqueeze(1))
if self.linear.bias is not None:
self.linear.bias.add_(-mean).mul_(inv_stdv)
return self(x)
def forward(self, input):
return self.linear(input)
class NICEMLPBlock(nn.Module):
def __init__(self, in_features, out_features, hidden_features, activation):
super(NICEMLPBlock, self).__init__()
assert activation in ['relu', 'elu', 'leaky_relu']
self.fc1 = nn.Linear(in_features, hidden_features, bias=True)
self.fc2 = nn.Linear(hidden_features, hidden_features, bias=True)
self.fc3 = LinearWeightNorm(hidden_features, out_features, bias=True)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'elu':
self.activation = nn.ELU(inplace=True)
else:
self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1)
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.constant_(self.fc2.bias, 0.0)
def forward(self, x):
out = self.activation(self.fc1(x))
out = self.activation(self.fc2(out))
out = self.fc3(out)
return out
def init(self, x, init_scale=1.0):
with torch.no_grad():
out = self.activation(self.fc1(x))
out = self.activation(self.fc2(out))
out = self.fc3.init(out, init_scale=0.0 * init_scale)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4, 'hidden_features': 4,
'activation': 'relu'}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_2(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_3(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 1), (1, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_2[grid(4)](primals_7, buf4,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__weight_norm_interface_3[grid(16)](primals_7,
primals_6, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf6,
primals_5, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf6, (64, 4), (4, 1), 0)
del buf6
extern_kernels.addmm(primals_8, buf7, reinterpret_tensor(buf5, (4,
4), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_8
return reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf5, primals_6, primals_7, reinterpret_tensor(primals_3, (64, 4
), (4, 1), 0), buf2, buf4, buf7, buf5, buf9, primals_4, buf10
class LinearWeightNorm(nn.Module):
def __init__(self, in_features, out_features, bias=True):
super(LinearWeightNorm, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.linear.weight, mean=0.0, std=0.05)
if self.linear.bias is not None:
nn.init.constant_(self.linear.bias, 0)
self.linear = nn.utils.weight_norm(self.linear)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def init(self, x, init_scale=1.0):
with torch.no_grad():
out = self(x).view(-1, self.linear.out_features)
mean = out.mean(dim=0)
std = out.std(dim=0)
inv_stdv = init_scale / (std + 1e-06)
self.linear.weight_g.mul_(inv_stdv.unsqueeze(1))
if self.linear.bias is not None:
self.linear.bias.add_(-mean).mul_(inv_stdv)
return self(x)
def forward(self, input):
return self.linear(input)
class NICEMLPBlockNew(nn.Module):
def __init__(self, in_features, out_features, hidden_features, activation):
super(NICEMLPBlockNew, self).__init__()
assert activation in ['relu', 'elu', 'leaky_relu']
self.fc1 = nn.Linear(in_features, hidden_features, bias=True)
self.fc2 = nn.Linear(hidden_features, hidden_features, bias=True)
self.fc3 = LinearWeightNorm(hidden_features, out_features, bias=True)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'elu':
self.activation = nn.ELU(inplace=True)
else:
self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1)
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.constant_(self.fc2.bias, 0.0)
def init(self, x, init_scale=1.0):
with torch.no_grad():
out = self.activation(self.fc1(x))
out = self.activation(self.fc2(out))
out = self.fc3.init(out, init_scale=0.0 * init_scale)
return out
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_8 = self.fc3.linear.bias
primals_6 = self.fc3.linear.weight_g
primals_7 = self.fc3.linear.weight_v
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| wp03052/wolf | NICEMLPBlock | false | 13,200 | [
"Apache-2.0"
]
| 0 | 49a582cafb829a2642db360c7d94c21439247ec7 | https://github.com/wp03052/wolf/tree/49a582cafb829a2642db360c7d94c21439247ec7 |
LipNormLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/cs/ccs52a7zexgfgg42ljzuxkjmhqfue6bfcp5dftqxbw4sww5mifsx.py
# Topologically Sorted Source Nodes: [truediv, sigmoid, mul, weight], Original ATen: [aten.div, aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# sigmoid => sigmoid
# truediv => div
# weight => mul_1
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %view_1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_2,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %sigmoid), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 0.97), kwargs = {})
triton_poi_fused_div_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_div_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.abs(tmp1)
tmp4 = tl_math.abs(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.abs(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.abs(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tmp0 / tmp11
tmp14 = tl.sigmoid(tmp13)
tmp15 = tmp12 * tmp14
tmp16 = 0.97
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 1), (1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv, sigmoid, mul, weight], Original ATen: [aten.div, aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_div_mul_sigmoid_0.run(primals_1, primals_2, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del buf0
del primals_3
return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, primals_2, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def _max_except_dim(input, dim):
maxed = input
for axis in range(input.ndimension() - 1, dim, -1):
maxed, _ = maxed.max(axis, keepdim=True)
for axis in range(dim - 1, -1, -1):
maxed, _ = maxed.max(axis, keepdim=True)
return maxed
def _norm_except_dim(w, norm_type, dim):
if norm_type == 1 or norm_type == 2:
return torch.norm_except_dim(w, norm_type, dim)
elif norm_type == float('inf'):
return _max_except_dim(w, dim)
def operator_norm_settings(domain, codomain):
if domain == 1 and codomain == 1:
max_across_input_dims = True
norm_type = 1
elif domain == 1 and codomain == 2:
max_across_input_dims = True
norm_type = 2
elif domain == 1 and codomain == float('inf'):
max_across_input_dims = True
norm_type = float('inf')
elif domain == 2 and codomain == float('inf'):
max_across_input_dims = False
norm_type = 2
elif domain == float('inf') and codomain == float('inf'):
max_across_input_dims = False
norm_type = 1
else:
raise ValueError('Unknown combination of domain "{}" and codomain "{}"'
.format(domain, codomain))
return max_across_input_dims, norm_type
def _logit(p):
p = torch.max(torch.ones(1) * 0.1, torch.min(torch.ones(1) * 0.9, p))
return torch.log(p + 1e-10) + torch.log(1 - p + 1e-10)
class LipNormLinear(nn.Linear):
"""Lipschitz constant defined using operator norms."""
def __init__(self, in_features, out_features, bias=True, coeff=0.97,
domain=float('inf'), codomain=float('inf'), local_constraint=True,
**unused_kwargs):
del unused_kwargs
super(LipNormLinear, self).__init__(in_features, out_features, bias)
self.coeff = coeff
self.domain = domain
self.codomain = codomain
self.local_constraint = local_constraint
max_across_input_dims, self.norm_type = operator_norm_settings(self
.domain, self.codomain)
self.max_across_dim = 1 if max_across_input_dims else 0
with torch.no_grad():
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=
self.max_across_dim)
if not self.local_constraint:
w_scale = w_scale.max()
self.scale = nn.Parameter(_logit(w_scale / self.coeff))
def compute_weight(self):
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=self.
max_across_dim)
if not self.local_constraint:
w_scale = w_scale.max()
return self.weight / w_scale * torch.sigmoid(self.scale) * self.coeff
def forward(self, input):
weight = self.compute_weight()
return F.linear(input, weight, self.bias)
def extra_repr(self):
s = super(LipNormLinear, self).extra_repr()
return s + ', coeff={}, domain={}, codomain={}, local={}'.format(self
.coeff, self.domain, self.codomain, self.local_constraint)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl_math.abs(tmp1)
tmp4 = tl_math.abs(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.abs(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.abs(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tmp0 / tmp11
tmp14 = tl.sigmoid(tmp13)
tmp15 = tmp12 * tmp14
tmp16 = 0.97
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x2, tmp17, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 1), (1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_mul_sigmoid_0[grid(16)](primals_1, primals_2,
buf0, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64,
4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del buf0
del primals_3
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, primals_2, reinterpret_tensor(primals_4, (64, 4), (4,
1), 0)
def _max_except_dim(input, dim):
maxed = input
for axis in range(input.ndimension() - 1, dim, -1):
maxed, _ = maxed.max(axis, keepdim=True)
for axis in range(dim - 1, -1, -1):
maxed, _ = maxed.max(axis, keepdim=True)
return maxed
def _norm_except_dim(w, norm_type, dim):
if norm_type == 1 or norm_type == 2:
return torch.norm_except_dim(w, norm_type, dim)
elif norm_type == float('inf'):
return _max_except_dim(w, dim)
def operator_norm_settings(domain, codomain):
if domain == 1 and codomain == 1:
max_across_input_dims = True
norm_type = 1
elif domain == 1 and codomain == 2:
max_across_input_dims = True
norm_type = 2
elif domain == 1 and codomain == float('inf'):
max_across_input_dims = True
norm_type = float('inf')
elif domain == 2 and codomain == float('inf'):
max_across_input_dims = False
norm_type = 2
elif domain == float('inf') and codomain == float('inf'):
max_across_input_dims = False
norm_type = 1
else:
raise ValueError('Unknown combination of domain "{}" and codomain "{}"'
.format(domain, codomain))
return max_across_input_dims, norm_type
def _logit(p):
p = torch.max(torch.ones(1) * 0.1, torch.min(torch.ones(1) * 0.9, p))
return torch.log(p + 1e-10) + torch.log(1 - p + 1e-10)
class LipNormLinearNew(nn.Linear):
"""Lipschitz constant defined using operator norms."""
def __init__(self, in_features, out_features, bias=True, coeff=0.97,
domain=float('inf'), codomain=float('inf'), local_constraint=True,
**unused_kwargs):
del unused_kwargs
super(LipNormLinearNew, self).__init__(in_features, out_features, bias)
self.coeff = coeff
self.domain = domain
self.codomain = codomain
self.local_constraint = local_constraint
max_across_input_dims, self.norm_type = operator_norm_settings(self
.domain, self.codomain)
self.max_across_dim = 1 if max_across_input_dims else 0
with torch.no_grad():
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=
self.max_across_dim)
if not self.local_constraint:
w_scale = w_scale.max()
self.scale = nn.Parameter(_logit(w_scale / self.coeff))
def compute_weight(self):
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=self.
max_across_dim)
if not self.local_constraint:
w_scale = w_scale.max()
return self.weight / w_scale * torch.sigmoid(self.scale) * self.coeff
def extra_repr(self):
s = super(LipNormLinearNew, self).extra_repr()
return s + ', coeff={}, domain={}, codomain={}, local={}'.format(self
.coeff, self.domain, self.codomain, self.local_constraint)
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = self.scale
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| zxydi1992/residual-flows | LipNormLinear | false | 13,201 | [
"MIT"
]
| 0 | 4ec289681dc91cff5312b22f7ebed93838b440fb | https://github.com/zxydi1992/residual-flows/tree/4ec289681dc91cff5312b22f7ebed93838b440fb |
FusedConvBN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/57/c57wdlov3bgaqzs3w4wazc2lo6kirw2c4evq4lg44z4f3geshi2f.py
# Topologically Sorted Source Nodes: [autograd_function_apply], Original ATen: [aten.sub, aten.div]
# Source node to ATen node mapping:
# autograd_function_apply => div_1, sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution, %unsqueeze_2), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %unsqueeze_5), kwargs = {})
triton_poi_fused_div_sub_0 = async_compile.triton('triton_poi_fused_div_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 0.25
tmp9 = tmp7 * tmp8
tmp10 = tmp0 - tmp9
tmp11 = 4.0
tmp12 = tmp7 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tmp2 - tmp12
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp4 - tmp12
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp6 - tmp12
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 0.001
tmp28 = tmp26 + tmp27
tmp29 = tmp10 / tmp28
tl.store(out_ptr0 + (x2), tmp29, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [autograd_function_apply], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
del primals_1
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [autograd_function_apply], Original ATen: [aten.sub, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_sub_0.run(buf0, buf1, 16, grid=grid(16), stream=stream0)
return (buf1, buf0, reinterpret_tensor(primals_2, (4, 4, 4, 4), (16, 64, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.quantization
import torch.onnx
import torchaudio.functional as F
import torch.nn.parallel
import torch.utils.data
from torch.functional import F
import torch.fx
import torch.nn
import torch.optim
import torch.profiler
def unsqueeze_all(t):
return t[None, :, None, None]
def batch_norm_backward(grad_out, X, sum, sqrt_var, N, eps):
tmp = ((X - unsqueeze_all(sum) / N) * grad_out).sum(dim=(0, 2, 3))
tmp *= -1
d_denom = tmp / (sqrt_var + eps) ** 2
d_var = d_denom / (2 * sqrt_var)
d_mean_dx = grad_out / unsqueeze_all(sqrt_var + eps)
d_mean_dx = unsqueeze_all(-d_mean_dx.sum(dim=(0, 2, 3)) / N)
grad_input = X * unsqueeze_all(d_var * N)
grad_input += unsqueeze_all(-d_var * sum)
grad_input *= 2 / ((N - 1) * N)
grad_input += d_mean_dx
grad_input *= unsqueeze_all(sqrt_var + eps)
grad_input += grad_out
grad_input /= unsqueeze_all(sqrt_var + eps)
return grad_input
def convolution_backward(grad_out, X, weight):
grad_input = F.conv2d(X.transpose(0, 1), grad_out.transpose(0, 1)
).transpose(0, 1)
grad_X = F.conv_transpose2d(grad_out, weight)
return grad_X, grad_input
class FusedConvBN2DFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, X, conv_weight, eps=0.001):
assert X.ndim == 4
ctx.save_for_backward(X, conv_weight)
X = F.conv2d(X, conv_weight)
sum = X.sum(dim=(0, 2, 3))
var = X.var(unbiased=True, dim=(0, 2, 3))
N = X.numel() / X.size(1)
sqrt_var = torch.sqrt(var)
ctx.eps = eps
ctx.sum = sum
ctx.N = N
ctx.sqrt_var = sqrt_var
mean = sum / N
denom = sqrt_var + eps
out = X - unsqueeze_all(mean)
out /= unsqueeze_all(denom)
return out
@staticmethod
def backward(ctx, grad_out):
X, conv_weight = ctx.saved_tensors
X_conv_out = F.conv2d(X, conv_weight)
grad_out = batch_norm_backward(grad_out, X_conv_out, ctx.sum, ctx.
sqrt_var, ctx.N, ctx.eps)
grad_X, grad_input = convolution_backward(grad_out, X, conv_weight)
return grad_X, grad_input, None, None, None, None, None
class FusedConvBN(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size,
exp_avg_factor=0.1, eps=0.001, device=None, dtype=None):
super(FusedConvBN, self).__init__()
factory_kwargs = {'device': device, 'dtype': dtype}
weight_shape = out_channels, in_channels, kernel_size, kernel_size
self.conv_weight = nn.Parameter(torch.empty(*weight_shape, **
factory_kwargs))
num_features = out_channels
self.num_features = num_features
self.eps = eps
self.reset_parameters()
def forward(self, X):
return FusedConvBN2DFunction.apply(X, self.conv_weight, self.eps)
def reset_parameters(self) ->None:
nn.init.kaiming_uniform_(self.conv_weight, a=math.sqrt(5))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
import torch.nn.functional as F
import torch.quantization
import torch.onnx
import torchaudio.functional as F
import torch.nn.parallel
import torch.utils.data
from torch.functional import F
import torch.fx
import torch.nn
import torch.optim
import torch.profiler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 0.25
tmp9 = tmp7 * tmp8
tmp10 = tmp0 - tmp9
tmp11 = 4.0
tmp12 = tmp7 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tmp2 - tmp12
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp4 - tmp12
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp6 - tmp12
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 0.001
tmp28 = tmp26 + tmp27
tmp29 = tmp10 / tmp28
tl.store(out_ptr0 + x2, tmp29, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
del primals_1
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sub_0[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
return buf1, buf0, reinterpret_tensor(primals_2, (4, 4, 4, 4), (16, 64,
4, 1), 0)
def unsqueeze_all(t):
return t[None, :, None, None]
def batch_norm_backward(grad_out, X, sum, sqrt_var, N, eps):
tmp = ((X - unsqueeze_all(sum) / N) * grad_out).sum(dim=(0, 2, 3))
tmp *= -1
d_denom = tmp / (sqrt_var + eps) ** 2
d_var = d_denom / (2 * sqrt_var)
d_mean_dx = grad_out / unsqueeze_all(sqrt_var + eps)
d_mean_dx = unsqueeze_all(-d_mean_dx.sum(dim=(0, 2, 3)) / N)
grad_input = X * unsqueeze_all(d_var * N)
grad_input += unsqueeze_all(-d_var * sum)
grad_input *= 2 / ((N - 1) * N)
grad_input += d_mean_dx
grad_input *= unsqueeze_all(sqrt_var + eps)
grad_input += grad_out
grad_input /= unsqueeze_all(sqrt_var + eps)
return grad_input
def convolution_backward(grad_out, X, weight):
grad_input = F.conv2d(X.transpose(0, 1), grad_out.transpose(0, 1)
).transpose(0, 1)
grad_X = F.conv_transpose2d(grad_out, weight)
return grad_X, grad_input
class FusedConvBN2DFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, X, conv_weight, eps=0.001):
assert X.ndim == 4
ctx.save_for_backward(X, conv_weight)
X = F.conv2d(X, conv_weight)
sum = X.sum(dim=(0, 2, 3))
var = X.var(unbiased=True, dim=(0, 2, 3))
N = X.numel() / X.size(1)
sqrt_var = torch.sqrt(var)
ctx.eps = eps
ctx.sum = sum
ctx.N = N
ctx.sqrt_var = sqrt_var
mean = sum / N
denom = sqrt_var + eps
out = X - unsqueeze_all(mean)
out /= unsqueeze_all(denom)
return out
@staticmethod
def backward(ctx, grad_out):
X, conv_weight = ctx.saved_tensors
X_conv_out = F.conv2d(X, conv_weight)
grad_out = batch_norm_backward(grad_out, X_conv_out, ctx.sum, ctx.
sqrt_var, ctx.N, ctx.eps)
grad_X, grad_input = convolution_backward(grad_out, X, conv_weight)
return grad_X, grad_input, None, None, None, None, None
class FusedConvBNNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size,
exp_avg_factor=0.1, eps=0.001, device=None, dtype=None):
super(FusedConvBNNew, self).__init__()
factory_kwargs = {'device': device, 'dtype': dtype}
weight_shape = out_channels, in_channels, kernel_size, kernel_size
self.conv_weight = nn.Parameter(torch.empty(*weight_shape, **
factory_kwargs))
num_features = out_channels
self.num_features = num_features
self.eps = eps
self.reset_parameters()
def reset_parameters(self) ->None:
nn.init.kaiming_uniform_(self.conv_weight, a=math.sqrt(5))
def forward(self, input_0):
primals_1 = self.conv_weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| youkaichao/tutorials | FusedConvBN | false | 13,202 | [
"BSD-3-Clause"
]
| 0 | af34b10b70d99659eb016a2a1d5c31b9ae8ba3da | https://github.com/youkaichao/tutorials/tree/af34b10b70d99659eb016a2a1d5c31b9ae8ba3da |
BeitPooler | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/5b/c5baki3tbgh3axx6a5rabawt3rjefz7oij5ndn6ywpiudiv4b32l.py
# Topologically Sorted Source Nodes: [mean, pooled_output], Original ATen: [aten.mean, aten.native_layer_norm]
# Source node to ATen node mapping:
# mean => mean
# pooled_output => var_mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%slice_2, [1]), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%mean, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_mean_native_layer_norm_0 = async_compile.triton('triton_poi_fused_mean_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (16 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (32 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (48 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (17 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (33 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (49 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (18 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (34 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (50 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (19 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (35 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (51 + (4*x0) + (64*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 3.0
tmp6 = tmp4 / tmp5
tmp9 = tmp7 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp11 / tmp5
tmp13 = tmp6 + tmp12
tmp16 = tmp14 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp18 / tmp5
tmp20 = tmp13 + tmp19
tmp23 = tmp21 + tmp22
tmp25 = tmp23 + tmp24
tmp26 = tmp25 / tmp5
tmp27 = tmp20 + tmp26
tmp28 = 4.0
tmp29 = tmp27 / tmp28
tmp30 = tmp6 - tmp29
tmp31 = tmp30 * tmp30
tmp32 = tmp12 - tmp29
tmp33 = tmp32 * tmp32
tmp34 = tmp31 + tmp33
tmp35 = tmp19 - tmp29
tmp36 = tmp35 * tmp35
tmp37 = tmp34 + tmp36
tmp38 = tmp26 - tmp29
tmp39 = tmp38 * tmp38
tmp40 = tmp37 + tmp39
tmp41 = tmp40 / tmp28
tl.store(out_ptr0 + (x2), tmp29, xmask)
tl.store(out_ptr1 + (x2), tmp41, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ma/cma2gztg2evdi5ypp34u5rjy75dzaegxc25fgxih4zrokn4a2btm.py
# Topologically Sorted Source Nodes: [mean, pooled_output], Original ATen: [aten.mean, aten.native_layer_norm]
# Source node to ATen node mapping:
# mean => mean
# pooled_output => add, add_1, mul, mul_1, rsqrt, sub
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%slice_2, [1]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mean, %getitem_1), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_mean_native_layer_norm_1 = async_compile.triton('triton_poi_fused_mean_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x4 = xindex % 16
x3 = (xindex // 4)
x5 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (16 + x4 + (64*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (32 + x4 + (64*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (48 + x4 + (64*x2)), xmask)
tmp7 = tl.load(in_ptr1 + (x3), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 3.0
tmp6 = tmp4 / tmp5
tmp8 = tmp6 - tmp7
tmp10 = 1.0
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp8 * tmp12
tmp15 = tmp13 * tmp14
tmp17 = tmp15 + tmp16
tl.store(out_ptr0 + (x5), tmp13, xmask)
tl.store(out_ptr1 + (x5), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [mean, pooled_output], Original ATen: [aten.mean, aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_native_layer_norm_0.run(primals_1, buf0, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, pooled_output], Original ATen: [aten.mean, aten.native_layer_norm]
triton_poi_fused_mean_native_layer_norm_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, buf3, 64, grid=grid(64), stream=stream0)
del buf0
del buf1
del primals_1
del primals_2
del primals_3
return (buf3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
from torch import nn
import torch.utils.checkpoint
class BeitPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps) if config.use_mean_pooling else None
def forward(self, hidden_states):
if self.layernorm is not None:
patch_tokens = hidden_states[:, 1:, :]
pooled_output = self.layernorm(patch_tokens.mean(1))
else:
pooled_output = hidden_states[:, 0]
return pooled_output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(use_mean_pooling=4, hidden_size=4,
layer_norm_eps=1)}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (16 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (32 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (48 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (17 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (33 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (49 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (18 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (34 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (50 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (19 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (35 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (51 + 4 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 3.0
tmp6 = tmp4 / tmp5
tmp9 = tmp7 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp11 / tmp5
tmp13 = tmp6 + tmp12
tmp16 = tmp14 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp18 / tmp5
tmp20 = tmp13 + tmp19
tmp23 = tmp21 + tmp22
tmp25 = tmp23 + tmp24
tmp26 = tmp25 / tmp5
tmp27 = tmp20 + tmp26
tmp28 = 4.0
tmp29 = tmp27 / tmp28
tmp30 = tmp6 - tmp29
tmp31 = tmp30 * tmp30
tmp32 = tmp12 - tmp29
tmp33 = tmp32 * tmp32
tmp34 = tmp31 + tmp33
tmp35 = tmp19 - tmp29
tmp36 = tmp35 * tmp35
tmp37 = tmp34 + tmp36
tmp38 = tmp26 - tmp29
tmp39 = tmp38 * tmp38
tmp40 = tmp37 + tmp39
tmp41 = tmp40 / tmp28
tl.store(out_ptr0 + x2, tmp29, xmask)
tl.store(out_ptr1 + x2, tmp41, xmask)
@triton.jit
def triton_poi_fused_mean_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x4 = xindex % 16
x3 = xindex // 4
x5 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (16 + x4 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (32 + x4 + 64 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (48 + x4 + 64 * x2), xmask)
tmp7 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 3.0
tmp6 = tmp4 / tmp5
tmp8 = tmp6 - tmp7
tmp10 = 1.0
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp8 * tmp12
tmp15 = tmp13 * tmp14
tmp17 = tmp15 + tmp16
tl.store(out_ptr0 + x5, tmp13, xmask)
tl.store(out_ptr1 + x5, tmp17, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_native_layer_norm_0[grid(16)](primals_1, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mean_native_layer_norm_1[grid(64)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf0
del buf1
del primals_1
del primals_2
del primals_3
return buf3, buf2
class BeitPoolerNew(nn.Module):
def __init__(self, config):
super().__init__()
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps) if config.use_mean_pooling else None
def forward(self, input_0):
primals_2 = self.layernorm.weight
primals_3 = self.layernorm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Clemens123/transformers | BeitPooler | false | 13,203 | [
"Apache-2.0"
]
| 0 | 22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26 | https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26 |
AttDec | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/wn/cwnu6ncturya7giec3q2cikfd3y3bkgzl4ibxp3ub3fpg5fssvsj.py
# Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# leaky_relu => gt, mul, where
# Graph fragment:
# %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor, 0.2), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_tensor, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ro/croeu53p6tbxtp6xjj4kgyxrozjl2ozleivfmextpoqt2lvk7ggz.py
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.div]
# Source node to ATen node mapping:
# h_1 => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%addmm_1, %expand), kwargs = {})
triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf1, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.div]
triton_poi_fused_div_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0)
return (buf3, buf1, primals_1, buf1, buf2, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class AttDec(nn.Module):
def __init__(self, opt, attSize):
super(AttDec, self).__init__()
self.embedSz = 0
self.fc1 = nn.Linear(opt.resSize + self.embedSz, opt.ngh)
self.fc3 = nn.Linear(opt.ngh, attSize)
self.lrelu = nn.LeakyReLU(0.2, True)
self.hidden = None
self.sigmoid = None
self.apply(weights_init)
def forward(self, feat, att=None):
h = feat
if self.embedSz > 0:
assert att is not None, 'Conditional Decoder requires attribute input'
h = torch.cat((feat, att), 1)
self.hidden = self.lrelu(self.fc1(h))
h = self.fc3(self.hidden)
if self.sigmoid is not None:
h = self.sigmoid(h)
else:
h = h / h.pow(2).sum(1).sqrt().unsqueeze(1).expand(h.size(0), h
.size(1))
self.out = h
return h
def getLayersOutDet(self):
return self.hidden.detach()
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config(resSize=4, ngh=4), 'attSize': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(16)](buf1, primals_3, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_1[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
return buf3, buf1, primals_1, buf1, buf2, primals_4
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class AttDecNew(nn.Module):
def __init__(self, opt, attSize):
super(AttDecNew, self).__init__()
self.embedSz = 0
self.fc1 = nn.Linear(opt.resSize + self.embedSz, opt.ngh)
self.fc3 = nn.Linear(opt.ngh, attSize)
self.lrelu = nn.LeakyReLU(0.2, True)
self.hidden = None
self.sigmoid = None
self.apply(weights_init)
def getLayersOutDet(self):
return self.hidden.detach()
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_3 = self.fc1.bias
primals_2 = self.fc3.weight
primals_5 = self.fc3.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| IacoSimoncini/tfvaegan | AttDec | false | 13,204 | [
"MIT"
]
| 0 | 157b526d65d0b0d5412f4be6fed02fc7d6325827 | https://github.com/IacoSimoncini/tfvaegan/tree/157b526d65d0b0d5412f4be6fed02fc7d6325827 |
DeiTAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {})
# %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/5j/c5jll3kxtd32cl7pwubrb5oky2mtzckfgip2xbwad7crvvp4zk4r.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_default_2, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_default_2, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/kt/cktnex5febczl2ac6zugjmcksgsd5kjdufazv65vtepuwob3cb7a.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {})
# %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_default_2, -inf), kwargs = {})
# %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {})
# %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {})
# %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {})
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (x2), xmask)
tmp26 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = float("-inf")
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = (tmp4 != 0)
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = (tmp9 != 0)
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = (tmp15 != 0)
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = (tmp21 != 0)
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + (x2), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# context_layer_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(buf5, buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(buf2, primals_7, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_7
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf9, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11)
del primals_9
return (reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), primals_8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
from typing import List
from typing import Tuple
from torch import nn
from typing import Set
import torch.utils.checkpoint
def find_pruneable_heads_and_indices(heads: 'List[int]', n_heads: 'int',
head_size: 'int', already_pruned_heads: 'Set[int]') ->Tuple[Set[int],
torch.LongTensor]:
"""
Finds the heads and their indices taking :obj:`already_pruned_heads` into account.
Args:
heads (:obj:`List[int]`): List of the indices of heads to prune.
n_heads (:obj:`int`): The number of heads in the model.
head_size (:obj:`int`): The size of each head.
already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.
Returns:
:obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
"""
mask = torch.ones(n_heads, head_size)
heads = set(heads) - already_pruned_heads
for head in heads:
head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index: 'torch.LongTensor' = torch.arange(len(mask))[mask].long()
return heads, index
def prune_linear_layer(layer: 'nn.Linear', index: 'torch.LongTensor', dim:
'int'=0) ->nn.Linear:
"""
Prune a linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`torch.nn.Linear`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.
Returns:
:obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None
)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
class DeiTSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if (config.hidden_size % config.num_attention_heads != 0 and not
hasattr(config, 'embedding_size')):
raise ValueError(
f'The hidden size {config.hidden_size,} is not a multiple of the number of attention heads {config.num_attention_heads}.'
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, head_mask=None, output_attentions=False):
mixed_query_layer = self.query(hidden_states)
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (
context_layer,)
return outputs
class DeiTSelfOutput(nn.Module):
"""
The residual connection is defined in DeiTLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class DeiTAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = DeiTSelfAttention(config)
self.output = DeiTSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.
attention.num_attention_heads, self.attention.
attention_head_size, self.pruned_heads)
self.attention.query = prune_linear_layer(self.attention.query, index)
self.attention.key = prune_linear_layer(self.attention.key, index)
self.attention.value = prune_linear_layer(self.attention.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.attention.num_attention_heads = (self.attention.
num_attention_heads - len(heads))
self.attention.all_head_size = (self.attention.attention_head_size *
self.attention.num_attention_heads)
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states, head_mask=None, output_attentions=False):
self_outputs = self.attention(hidden_states, head_mask,
output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from typing import List
from typing import Tuple
from torch import nn
from typing import Set
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf8, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_7
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_9, reinterpret_tensor(buf10, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf11)
del primals_9
return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), primals_8
def find_pruneable_heads_and_indices(heads: 'List[int]', n_heads: 'int',
head_size: 'int', already_pruned_heads: 'Set[int]') ->Tuple[Set[int],
torch.LongTensor]:
"""
Finds the heads and their indices taking :obj:`already_pruned_heads` into account.
Args:
heads (:obj:`List[int]`): List of the indices of heads to prune.
n_heads (:obj:`int`): The number of heads in the model.
head_size (:obj:`int`): The size of each head.
already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.
Returns:
:obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
"""
mask = torch.ones(n_heads, head_size)
heads = set(heads) - already_pruned_heads
for head in heads:
head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index: 'torch.LongTensor' = torch.arange(len(mask))[mask].long()
return heads, index
def prune_linear_layer(layer: 'nn.Linear', index: 'torch.LongTensor', dim:
'int'=0) ->nn.Linear:
"""
Prune a linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`torch.nn.Linear`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.
Returns:
:obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None
)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
class DeiTSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if (config.hidden_size % config.num_attention_heads != 0 and not
hasattr(config, 'embedding_size')):
raise ValueError(
f'The hidden size {config.hidden_size,} is not a multiple of the number of attention heads {config.num_attention_heads}.'
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, head_mask=None, output_attentions=False):
mixed_query_layer = self.query(hidden_states)
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (
context_layer,)
return outputs
class DeiTSelfOutput(nn.Module):
"""
The residual connection is defined in DeiTLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class DeiTAttentionNew(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = DeiTSelfAttention(config)
self.output = DeiTSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.
attention.num_attention_heads, self.attention.
attention_head_size, self.pruned_heads)
self.attention.query = prune_linear_layer(self.attention.query, index)
self.attention.key = prune_linear_layer(self.attention.key, index)
self.attention.value = prune_linear_layer(self.attention.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.attention.num_attention_heads = (self.attention.
num_attention_heads - len(heads))
self.attention.all_head_size = (self.attention.attention_head_size *
self.attention.num_attention_heads)
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, input_0):
primals_1 = self.attention.query.weight
primals_2 = self.attention.query.bias
primals_4 = self.attention.key.weight
primals_5 = self.attention.key.bias
primals_6 = self.attention.value.weight
primals_7 = self.attention.value.bias
primals_8 = self.output.dense.weight
primals_9 = self.output.dense.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| Clemens123/transformers | DeiTAttention | false | 13,205 | [
"Apache-2.0"
]
| 0 | 22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26 | https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26 |
Discriminator_D1 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# h => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/5n/c5nsofekij6a7ap52g7x25mzkhruvzcsi6l2zpm7nkpnzba6k7fc.py
# Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# leaky_relu => gt, mul, where
# Graph fragment:
# %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_4), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor, 0.2), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_tensor, %mul), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (1, 4), (4, 1))
assert_size_stride(primals_6, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf2, primals_4, 16, grid=grid(16), stream=stream0)
del primals_4
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_6
return (buf4, buf2, buf0, buf2, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class Discriminator_D1(nn.Module):
def __init__(self, opt):
super(Discriminator_D1, self).__init__()
self.fc1 = nn.Linear(opt.resSize + opt.attSize, opt.ndh)
self.fc2 = nn.Linear(opt.ndh, 1)
self.lrelu = nn.LeakyReLU(0.2, True)
self.apply(weights_init)
def forward(self, x, att):
h = torch.cat((x, att), 1)
self.hidden = self.lrelu(self.fc1(h))
h = self.fc2(self.hidden)
return h
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config(resSize=4, attSize=4, ndh=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (1, 4), (4, 1))
assert_size_stride(primals_6, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8
), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_leaky_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5,
(4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_6
return buf4, buf2, buf0, buf2, primals_5
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class Discriminator_D1New(nn.Module):
def __init__(self, opt):
super(Discriminator_D1New, self).__init__()
self.fc1 = nn.Linear(opt.resSize + opt.attSize, opt.ndh)
self.fc2 = nn.Linear(opt.ndh, 1)
self.lrelu = nn.LeakyReLU(0.2, True)
self.apply(weights_init)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| IacoSimoncini/tfvaegan | Discriminator_D1 | false | 13,206 | [
"MIT"
]
| 0 | 157b526d65d0b0d5412f4be6fed02fc7d6325827 | https://github.com/IacoSimoncini/tfvaegan/tree/157b526d65d0b0d5412f4be6fed02fc7d6325827 |
SPPblock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/hs/chsgbajkvlzt23dbj5auzazquzfdbhbhjrpqoczeg3opck4yocad.py
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d => getitem
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/dj/cdjpvf45m2gmwdpxqghwy3n7o5canbnu4ks6bxkuaf6ogy4u6mcz.py
# Topologically Sorted Source Nodes: [upsample], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# upsample => convert_element_type_1
# Graph fragment:
# %convert_element_type_1 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
triton_poi_fused__to_copy_1 = async_compile.triton('triton_poi_fused__to_copy_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ek/cektoo3xtedaewlh5uggdyf55krfjuty35h3vjq6vtyduxqrlkz4.py
# Topologically Sorted Source Nodes: [upsample], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# upsample => add_1, clamp_max
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 1), kwargs = {})
# %clamp_max : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_1, 31), kwargs = {})
triton_poi_fused_add_clamp_2 = async_compile.triton('triton_poi_fused_add_clamp_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_2(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 31, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/a3/ca3np32wv5647cru4u4cskmo7z65jffrdabbplzceq4wcduwuwh7.py
# Topologically Sorted Source Nodes: [upsample], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# upsample => add, clamp_max_2, clamp_min, clamp_min_2, convert_element_type, iota, mul, sub, sub_2
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %add : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 0.5), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/3i/c3i4svp5bjn25m4h4mozovf2gf77ztkp3ps4iaw6wj2bfxlz77ne.py
# Topologically Sorted Source Nodes: [max_pool2d_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d_1 => getitem_2
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_4 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 7056
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 21
x1 = (xindex // 21) % 21
x4 = (xindex // 441)
x3 = (xindex // 1764)
x5 = xindex % 1764
tmp0 = tl.load(in_ptr0 + ((3*x0) + (192*x1) + (4096*x4)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (3*x0) + (192*x1) + (4096*x4)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (3*x0) + (192*x1) + (4096*x4)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (64 + (3*x0) + (192*x1) + (4096*x4)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (65 + (3*x0) + (192*x1) + (4096*x4)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (66 + (3*x0) + (192*x1) + (4096*x4)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (128 + (3*x0) + (192*x1) + (4096*x4)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (129 + (3*x0) + (192*x1) + (4096*x4)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (130 + (3*x0) + (192*x1) + (4096*x4)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tl.store(out_ptr0 + (x5 + (1792*x3)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/f4/cf4oomz2jxs2jmynidcxgsi4hc5a5g5w6e6mfoejtiygvx2ktoxm.py
# Topologically Sorted Source Nodes: [upsample_1], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# upsample_1 => convert_element_type_5
# Graph fragment:
# %convert_element_type_5 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_2, torch.int64), kwargs = {})
triton_poi_fused__to_copy_5 = async_compile.triton('triton_poi_fused__to_copy_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_5(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.328125
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/kb/ckbhvrchwnfddqo5mj7oyddllrqzc7dajgqmaztjfb4t45pz54ma.py
# Topologically Sorted Source Nodes: [upsample_1], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# upsample_1 => add_8, clamp_max_4
# Graph fragment:
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_5, 1), kwargs = {})
# %clamp_max_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_8, 20), kwargs = {})
triton_poi_fused_add_clamp_6 = async_compile.triton('triton_poi_fused_add_clamp_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_6(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.328125
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 20, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/3b/c3b5xlygn2w35ktaemwgswb2qexnj6ytxz2jxvf3c4hb3qpx6hv4.py
# Topologically Sorted Source Nodes: [upsample, upsample_1], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# upsample => add, convert_element_type, iota
# upsample_1 => clamp_max_6, clamp_min_4, clamp_min_6, mul_5, sub_7, sub_9
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %add : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.328125), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_5, 0.5), kwargs = {})
# %clamp_min_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_7, 0.0), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_4, %convert_element_type_7), kwargs = {})
# %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_9, 0.0), kwargs = {})
# %clamp_max_6 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_6, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.328125
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = 1.0
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tl.store(out_ptr0 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/pf/cpfwuo7tucoqpsuoxs3ocdrmbokrprhchayywaz5gswuopkfmgsd.py
# Topologically Sorted Source Nodes: [max_pool2d_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d_2 => getitem_4
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 25, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = (xindex // 12) % 12
x2 = (xindex // 144)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (64 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (65 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (66 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (67 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (68 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (128 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (129 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (130 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (131 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (132 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (192 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr0 + (193 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (194 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr0 + (195 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr0 + (196 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr0 + (256 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr0 + (257 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr0 + (258 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr0 + (259 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr0 + (260 + (5*x0) + (320*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tmp32 = triton_helpers.maximum(tmp31, tmp30)
tmp34 = triton_helpers.maximum(tmp33, tmp32)
tmp36 = triton_helpers.maximum(tmp35, tmp34)
tmp38 = triton_helpers.maximum(tmp37, tmp36)
tmp40 = triton_helpers.maximum(tmp39, tmp38)
tmp42 = triton_helpers.maximum(tmp41, tmp40)
tmp44 = triton_helpers.maximum(tmp43, tmp42)
tmp46 = triton_helpers.maximum(tmp45, tmp44)
tmp48 = triton_helpers.maximum(tmp47, tmp46)
tl.store(out_ptr0 + (x3), tmp48, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/l7/cl7p22pvafpcrmefx45kyqbanh4ld76op7eq5grjd2zzx2zlpwi3.py
# Topologically Sorted Source Nodes: [upsample_2], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# upsample_2 => convert_element_type_9
# Graph fragment:
# %convert_element_type_9 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_4, torch.int64), kwargs = {})
triton_poi_fused__to_copy_9 = async_compile.triton('triton_poi_fused__to_copy_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_9(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.1875
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/qe/cqewu77x72ovzvlhbycbd53cqjkbyy7zdjjvtqgely7c6xo647u2.py
# Topologically Sorted Source Nodes: [upsample_2], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# upsample_2 => add_15, clamp_max_8
# Graph fragment:
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_9, 1), kwargs = {})
# %clamp_max_8 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_15, 11), kwargs = {})
triton_poi_fused_add_clamp_10 = async_compile.triton('triton_poi_fused_add_clamp_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_10(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.1875
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 11, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/eh/ceh56lg3zkvoclsk7od77ns5p3v4jnvm5zcvn2233nis5q7wkit7.py
# Topologically Sorted Source Nodes: [upsample, upsample_2], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# upsample => add, convert_element_type, iota
# upsample_2 => clamp_max_10, clamp_min_10, clamp_min_8, mul_10, sub_14, sub_16
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %add : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.1875), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_10, 0.5), kwargs = {})
# %clamp_min_8 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_14, 0.0), kwargs = {})
# %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_8, %convert_element_type_11), kwargs = {})
# %clamp_min_10 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_16, 0.0), kwargs = {})
# %clamp_max_10 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_10, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.1875
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = 1.0
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tl.store(out_ptr0 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/jt/cjtnt6hjamm3vgjkpwzorbvktkzw6jrtwkljjwmzihzeqhu6sgk7.py
# Topologically Sorted Source Nodes: [upsample_3], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# upsample_3 => convert_element_type_13
# Graph fragment:
# %convert_element_type_13 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_6, torch.int64), kwargs = {})
triton_poi_fused__to_copy_12 = async_compile.triton('triton_poi_fused__to_copy_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_12(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.15625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ww/cwwsqvnexaz5z4zaqrm4l3223xywmpmnz3nd4sw3jgy7pqet5ewn.py
# Topologically Sorted Source Nodes: [upsample_3], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# upsample_3 => add_22, clamp_max_12
# Graph fragment:
# %add_22 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_13, 1), kwargs = {})
# %clamp_max_12 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_22, 9), kwargs = {})
triton_poi_fused_add_clamp_13 = async_compile.triton('triton_poi_fused_add_clamp_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_13(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.15625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 9, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/4d/c4dnqp23qes54gwuldfae6pd5dtfswfwyytxtquobu74catwihxm.py
# Topologically Sorted Source Nodes: [upsample, upsample_3], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# upsample => add, convert_element_type, iota
# upsample_3 => clamp_max_14, clamp_min_12, clamp_min_14, mul_15, sub_21, sub_23
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %add : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.15625), kwargs = {})
# %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_15, 0.5), kwargs = {})
# %clamp_min_12 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_21, 0.0), kwargs = {})
# %sub_23 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_12, %convert_element_type_15), kwargs = {})
# %clamp_min_14 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_23, 0.0), kwargs = {})
# %clamp_max_14 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_14, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.15625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = 1.0
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tl.store(out_ptr0 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/c4/cc4gbby2j4xsnyg53hb2ubfdlif6prlt7fohlcbiudyuu2bhws6j.py
# Topologically Sorted Source Nodes: [conv2d, upsample, conv2d_1, upsample_1, conv2d_2, upsample_2, conv2d_3, upsample_3], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d => convolution
# conv2d_1 => convolution_1
# conv2d_2 => convolution_2
# conv2d_3 => convolution_3
# upsample => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_4, add_5, add_6, mul_2, mul_3, mul_4, sub_3, sub_4, sub_6
# upsample_1 => _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, add_11, add_12, add_13, mul_7, mul_8, mul_9, sub_10, sub_11, sub_13
# upsample_2 => _unsafe_index_10, _unsafe_index_11, _unsafe_index_8, _unsafe_index_9, add_18, add_19, add_20, mul_12, mul_13, mul_14, sub_17, sub_18, sub_20
# upsample_3 => _unsafe_index_12, _unsafe_index_13, _unsafe_index_14, _unsafe_index_15, add_25, add_26, add_27, mul_17, mul_18, mul_19, sub_24, sub_25, sub_27
# Graph fragment:
# %convolution : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {})
# %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_4), kwargs = {})
# %convolution_1 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_1, [None, None, %convert_element_type_5, %convert_element_type_7]), kwargs = {})
# %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_1, [None, None, %convert_element_type_5, %clamp_max_5]), kwargs = {})
# %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_1, [None, None, %clamp_max_4, %convert_element_type_7]), kwargs = {})
# %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_1, [None, None, %clamp_max_4, %clamp_max_5]), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_5, %_unsafe_index_4), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %clamp_max_6), kwargs = {})
# %add_11 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_4, %mul_7), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_7, %_unsafe_index_6), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %clamp_max_6), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_6, %mul_8), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_12, %add_11), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_13, %clamp_max_7), kwargs = {})
# %add_13 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %mul_9), kwargs = {})
# %convolution_2 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %_unsafe_index_8 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_2, [None, None, %convert_element_type_9, %convert_element_type_11]), kwargs = {})
# %_unsafe_index_9 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_2, [None, None, %convert_element_type_9, %clamp_max_9]), kwargs = {})
# %_unsafe_index_10 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_2, [None, None, %clamp_max_8, %convert_element_type_11]), kwargs = {})
# %_unsafe_index_11 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_2, [None, None, %clamp_max_8, %clamp_max_9]), kwargs = {})
# %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_9, %_unsafe_index_8), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, %clamp_max_10), kwargs = {})
# %add_18 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_8, %mul_12), kwargs = {})
# %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_11, %_unsafe_index_10), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_18, %clamp_max_10), kwargs = {})
# %add_19 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_10, %mul_13), kwargs = {})
# %sub_20 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_19, %add_18), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_20, %clamp_max_11), kwargs = {})
# %add_20 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_18, %mul_14), kwargs = {})
# %convolution_3 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %_unsafe_index_12 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_3, [None, None, %convert_element_type_13, %convert_element_type_15]), kwargs = {})
# %_unsafe_index_13 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_3, [None, None, %convert_element_type_13, %clamp_max_13]), kwargs = {})
# %_unsafe_index_14 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_3, [None, None, %clamp_max_12, %convert_element_type_15]), kwargs = {})
# %_unsafe_index_15 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_3, [None, None, %clamp_max_12, %clamp_max_13]), kwargs = {})
# %sub_24 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_13, %_unsafe_index_12), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_24, %clamp_max_14), kwargs = {})
# %add_25 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_12, %mul_17), kwargs = {})
# %sub_25 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_15, %_unsafe_index_14), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_25, %clamp_max_14), kwargs = {})
# %add_26 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_14, %mul_18), kwargs = {})
# %sub_27 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_26, %add_25), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_27, %clamp_max_15), kwargs = {})
# %add_27 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_25, %mul_19), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_mul_sub_15 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_sub_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: '*i64', 6: '*fp32', 7: '*fp32', 8: '*i64', 9: '*fp32', 10: '*i64', 11: '*fp32', 12: '*i64', 13: '*i64', 14: '*fp32', 15: '*i64', 16: '*fp32', 17: '*i64', 18: '*fp32', 19: '*i64', 20: '*i64', 21: '*fp32', 22: '*i64', 23: '*fp32', 24: '*i64', 25: '*fp32', 26: '*i64', 27: '*i64', 28: '*fp32', 29: '*i64', 30: '*fp32', 31: '*i64', 32: '*fp32', 33: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_sub_15', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1', 'in_out_ptr2', 'in_out_ptr3'], 'no_x_dim': False, 'num_load': 25, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_15(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 64
x0 = xindex % 64
x2 = (xindex // 4096)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (0))
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp13 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + (x1), None, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr9 + (x0), None, eviction_policy='evict_last')
tmp49 = tl.load(in_ptr11 + (x0), None, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr12 + (x0), None, eviction_policy='evict_last')
tmp59 = tl.load(in_ptr13 + (x1), None, eviction_policy='evict_last')
tmp71 = tl.load(in_ptr14 + (x1), None, eviction_policy='evict_last')
tmp74 = tl.load(in_ptr15 + (x1), None, eviction_policy='evict_last')
tmp79 = tl.load(in_ptr16 + (x0), None, eviction_policy='evict_last')
tmp85 = tl.load(in_ptr18 + (x0), None, eviction_policy='evict_last')
tmp92 = tl.load(in_ptr19 + (x0), None, eviction_policy='evict_last')
tmp95 = tl.load(in_ptr20 + (x1), None, eviction_policy='evict_last')
tmp107 = tl.load(in_ptr21 + (x1), None, eviction_policy='evict_last')
tmp110 = tl.load(in_ptr22 + (x1), None, eviction_policy='evict_last')
tmp115 = tl.load(in_ptr23 + (x0), None, eviction_policy='evict_last')
tmp121 = tl.load(in_ptr25 + (x0), None, eviction_policy='evict_last')
tmp128 = tl.load(in_ptr26 + (x0), None, eviction_policy='evict_last')
tmp131 = tl.load(in_ptr27 + (x1), None, eviction_policy='evict_last')
tmp143 = tl.load(in_ptr28 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (32*tmp4) + (1024*x2)), None, eviction_policy='evict_last')
tmp12 = tmp9 + tmp11
tmp14 = tmp13 + tmp1
tmp15 = tmp13 < 0
tmp16 = tl.where(tmp15, tmp14, tmp13)
tmp17 = tl.load(in_ptr2 + (tmp16 + (32*tmp4) + (1024*x2)), None, eviction_policy='evict_last')
tmp18 = tmp17 + tmp11
tmp19 = tmp18 - tmp12
tmp21 = tmp19 * tmp20
tmp22 = tmp12 + tmp21
tmp24 = tmp23 + tmp1
tmp25 = tmp23 < 0
tmp26 = tl.where(tmp25, tmp24, tmp23)
tmp27 = tl.load(in_ptr2 + (tmp8 + (32*tmp26) + (1024*x2)), None, eviction_policy='evict_last')
tmp28 = tmp27 + tmp11
tmp29 = tl.load(in_ptr2 + (tmp16 + (32*tmp26) + (1024*x2)), None, eviction_policy='evict_last')
tmp30 = tmp29 + tmp11
tmp31 = tmp30 - tmp28
tmp32 = tmp31 * tmp20
tmp33 = tmp28 + tmp32
tmp34 = tmp33 - tmp22
tmp36 = tmp34 * tmp35
tmp37 = tmp22 + tmp36
tmp39 = tl.full([XBLOCK], 21, tl.int32)
tmp40 = tmp38 + tmp39
tmp41 = tmp38 < 0
tmp42 = tl.where(tmp41, tmp40, tmp38)
tmp44 = tmp43 + tmp39
tmp45 = tmp43 < 0
tmp46 = tl.where(tmp45, tmp44, tmp43)
tmp47 = tl.load(in_ptr10 + (tmp46 + (21*tmp42) + (441*x2)), None, eviction_policy='evict_last')
tmp48 = tmp47 + tmp11
tmp50 = tmp49 + tmp39
tmp51 = tmp49 < 0
tmp52 = tl.where(tmp51, tmp50, tmp49)
tmp53 = tl.load(in_ptr10 + (tmp52 + (21*tmp42) + (441*x2)), None, eviction_policy='evict_last')
tmp54 = tmp53 + tmp11
tmp55 = tmp54 - tmp48
tmp57 = tmp55 * tmp56
tmp58 = tmp48 + tmp57
tmp60 = tmp59 + tmp39
tmp61 = tmp59 < 0
tmp62 = tl.where(tmp61, tmp60, tmp59)
tmp63 = tl.load(in_ptr10 + (tmp46 + (21*tmp62) + (441*x2)), None, eviction_policy='evict_last')
tmp64 = tmp63 + tmp11
tmp65 = tl.load(in_ptr10 + (tmp52 + (21*tmp62) + (441*x2)), None, eviction_policy='evict_last')
tmp66 = tmp65 + tmp11
tmp67 = tmp66 - tmp64
tmp68 = tmp67 * tmp56
tmp69 = tmp64 + tmp68
tmp70 = tmp69 - tmp58
tmp72 = tmp70 * tmp71
tmp73 = tmp58 + tmp72
tmp75 = tl.full([XBLOCK], 12, tl.int32)
tmp76 = tmp74 + tmp75
tmp77 = tmp74 < 0
tmp78 = tl.where(tmp77, tmp76, tmp74)
tmp80 = tmp79 + tmp75
tmp81 = tmp79 < 0
tmp82 = tl.where(tmp81, tmp80, tmp79)
tmp83 = tl.load(in_ptr17 + (tmp82 + (12*tmp78) + (144*x2)), None, eviction_policy='evict_last')
tmp84 = tmp83 + tmp11
tmp86 = tmp85 + tmp75
tmp87 = tmp85 < 0
tmp88 = tl.where(tmp87, tmp86, tmp85)
tmp89 = tl.load(in_ptr17 + (tmp88 + (12*tmp78) + (144*x2)), None, eviction_policy='evict_last')
tmp90 = tmp89 + tmp11
tmp91 = tmp90 - tmp84
tmp93 = tmp91 * tmp92
tmp94 = tmp84 + tmp93
tmp96 = tmp95 + tmp75
tmp97 = tmp95 < 0
tmp98 = tl.where(tmp97, tmp96, tmp95)
tmp99 = tl.load(in_ptr17 + (tmp82 + (12*tmp98) + (144*x2)), None, eviction_policy='evict_last')
tmp100 = tmp99 + tmp11
tmp101 = tl.load(in_ptr17 + (tmp88 + (12*tmp98) + (144*x2)), None, eviction_policy='evict_last')
tmp102 = tmp101 + tmp11
tmp103 = tmp102 - tmp100
tmp104 = tmp103 * tmp92
tmp105 = tmp100 + tmp104
tmp106 = tmp105 - tmp94
tmp108 = tmp106 * tmp107
tmp109 = tmp94 + tmp108
tmp111 = tl.full([XBLOCK], 10, tl.int32)
tmp112 = tmp110 + tmp111
tmp113 = tmp110 < 0
tmp114 = tl.where(tmp113, tmp112, tmp110)
tmp116 = tmp115 + tmp111
tmp117 = tmp115 < 0
tmp118 = tl.where(tmp117, tmp116, tmp115)
tmp119 = tl.load(in_ptr24 + (tmp118 + (10*tmp114) + (100*x2)), None, eviction_policy='evict_last')
tmp120 = tmp119 + tmp11
tmp122 = tmp121 + tmp111
tmp123 = tmp121 < 0
tmp124 = tl.where(tmp123, tmp122, tmp121)
tmp125 = tl.load(in_ptr24 + (tmp124 + (10*tmp114) + (100*x2)), None, eviction_policy='evict_last')
tmp126 = tmp125 + tmp11
tmp127 = tmp126 - tmp120
tmp129 = tmp127 * tmp128
tmp130 = tmp120 + tmp129
tmp132 = tmp131 + tmp111
tmp133 = tmp131 < 0
tmp134 = tl.where(tmp133, tmp132, tmp131)
tmp135 = tl.load(in_ptr24 + (tmp118 + (10*tmp134) + (100*x2)), None, eviction_policy='evict_last')
tmp136 = tmp135 + tmp11
tmp137 = tl.load(in_ptr24 + (tmp124 + (10*tmp134) + (100*x2)), None, eviction_policy='evict_last')
tmp138 = tmp137 + tmp11
tmp139 = tmp138 - tmp136
tmp140 = tmp139 * tmp128
tmp141 = tmp136 + tmp140
tmp142 = tmp141 - tmp130
tmp144 = tmp142 * tmp143
tmp145 = tmp130 + tmp144
tl.store(in_out_ptr0 + (x3), tmp37, None)
tl.store(in_out_ptr1 + (x3), tmp73, None)
tl.store(in_out_ptr2 + (x3), tmp109, None)
tl.store(in_out_ptr3 + (x3), tmp145, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ld/cld3befcx6mrznygcnfhl7k57tcgfua7ztzqqou5wkquttfw6ztp.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%add_6, %add_13, %add_20, %add_27, %primals_1], 1), kwargs = {})
triton_poi_fused_cat_16 = async_compile.triton('triton_poi_fused_cat_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 8
x0 = xindex % 4096
x2 = (xindex // 32768)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x2)), tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (4096*x2)), tmp9, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + (4096*x2)), tmp14, eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 4, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr3 + (x0 + (4096*x2)), tmp19, eviction_policy='evict_last', other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 8, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tl.load(in_ptr4 + (x0 + (4096*((-4) + x1)) + (16384*x2)), tmp21, other=0.0)
tmp25 = tl.where(tmp19, tmp20, tmp24)
tmp26 = tl.where(tmp14, tmp15, tmp25)
tmp27 = tl.where(tmp9, tmp10, tmp26)
tmp28 = tl.where(tmp4, tmp5, tmp27)
tl.store(out_ptr0 + (x3), tmp28, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0.run(primals_1, buf0, 16384, grid=grid(16384), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 32, 32), (1024, 1024, 32, 1))
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [upsample], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_1.run(buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [upsample], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_2.run(buf3, 64, grid=grid(64), stream=stream0)
buf4 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [upsample], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_1.run(buf4, 64, grid=grid(64), stream=stream0)
buf5 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [upsample], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_2.run(buf5, 64, grid=grid(64), stream=stream0)
buf6 = empty_strided_cuda((64, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [upsample], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3.run(buf6, 64, grid=grid(64), stream=stream0)
buf11 = empty_strided_cuda((4, 4, 21, 21), (1792, 441, 21, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_4.run(primals_1, buf11, 7056, grid=grid(7056), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 21, 21), (441, 441, 21, 1))
buf13 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [upsample_1], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_5.run(buf13, 64, grid=grid(64), stream=stream0)
buf14 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [upsample_1], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_6.run(buf14, 64, grid=grid(64), stream=stream0)
buf15 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [upsample, upsample_1], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_5.run(buf15, 64, grid=grid(64), stream=stream0)
buf16 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [upsample_1], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_6.run(buf16, 64, grid=grid(64), stream=stream0)
buf17 = empty_strided_cuda((64, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [upsample, upsample_1], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7.run(buf17, 64, grid=grid(64), stream=stream0)
buf19 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [upsample_1], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7.run(buf19, 64, grid=grid(64), stream=stream0)
buf22 = empty_strided_cuda((4, 4, 12, 12), (576, 144, 12, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_8.run(primals_1, buf22, 2304, grid=grid(2304), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf23 = extern_kernels.convolution(buf22, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 1, 12, 12), (144, 144, 12, 1))
buf24 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [upsample_2], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_9.run(buf24, 64, grid=grid(64), stream=stream0)
buf25 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [upsample_2], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_10.run(buf25, 64, grid=grid(64), stream=stream0)
buf26 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [upsample, upsample_2], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_9.run(buf26, 64, grid=grid(64), stream=stream0)
buf27 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [upsample_2], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_10.run(buf27, 64, grid=grid(64), stream=stream0)
buf28 = empty_strided_cuda((64, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [upsample, upsample_2], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11.run(buf28, 64, grid=grid(64), stream=stream0)
buf30 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [upsample_2], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11.run(buf30, 64, grid=grid(64), stream=stream0)
# Topologically Sorted Source Nodes: [max_pool2d_3], Original ATen: [aten.max_pool2d_with_indices]
buf33 = torch.ops.aten.max_pool2d_with_indices.default(primals_1, [6, 6], [6, 6])
buf34 = buf33[0]
del buf33
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf36 = extern_kernels.convolution(buf34, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 1, 10, 10), (100, 100, 10, 1))
buf37 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [upsample_3], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_12.run(buf37, 64, grid=grid(64), stream=stream0)
buf38 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [upsample_3], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_13.run(buf38, 64, grid=grid(64), stream=stream0)
buf39 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [upsample, upsample_3], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_12.run(buf39, 64, grid=grid(64), stream=stream0)
buf40 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [upsample_3], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_13.run(buf40, 64, grid=grid(64), stream=stream0)
buf41 = empty_strided_cuda((64, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [upsample, upsample_3], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14.run(buf41, 64, grid=grid(64), stream=stream0)
buf43 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [upsample_3], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14.run(buf43, 64, grid=grid(64), stream=stream0)
buf8 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [upsample], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3.run(buf8, 64, grid=grid(64), stream=stream0)
buf9 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32)
buf10 = reinterpret_tensor(buf9, (4, 1, 64, 64), (4096, 4096, 64, 1), 0); del buf9 # reuse
buf20 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32)
buf21 = reinterpret_tensor(buf20, (4, 1, 64, 64), (4096, 4096, 64, 1), 0); del buf20 # reuse
buf31 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32)
buf32 = reinterpret_tensor(buf31, (4, 1, 64, 64), (4096, 4096, 64, 1), 0); del buf31 # reuse
buf44 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32)
buf45 = reinterpret_tensor(buf44, (4, 1, 64, 64), (4096, 4096, 64, 1), 0); del buf44 # reuse
# Topologically Sorted Source Nodes: [conv2d, upsample, conv2d_1, upsample_1, conv2d_2, upsample_2, conv2d_3, upsample_3], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_mul_sub_15.run(buf10, buf21, buf32, buf45, buf2, buf4, buf1, primals_3, buf5, buf6, buf3, buf8, buf13, buf15, buf12, buf16, buf17, buf14, buf19, buf24, buf26, buf23, buf27, buf28, buf25, buf30, buf37, buf39, buf36, buf40, buf41, buf38, buf43, 16384, grid=grid(16384), stream=stream0)
del buf1
del buf12
del buf23
del buf36
del primals_3
buf46 = empty_strided_cuda((4, 8, 64, 64), (32768, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
triton_poi_fused_cat_16.run(buf10, buf21, buf32, buf45, primals_1, buf46, 131072, grid=grid(131072), stream=stream0)
del primals_1
return (buf46, buf45, buf32, buf21, buf10, primals_2, buf0, buf2, buf3, buf4, buf5, buf6, buf8, buf11, buf13, buf14, buf15, buf16, buf17, buf19, buf22, buf24, buf25, buf26, buf27, buf28, buf30, buf34, buf37, buf38, buf39, buf40, buf41, buf43, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class SPPblock(nn.Module):
def __init__(self, in_channels):
super(SPPblock, self).__init__()
self.pool1 = nn.MaxPool2d(kernel_size=[2, 2], stride=2)
self.pool2 = nn.MaxPool2d(kernel_size=[3, 3], stride=3)
self.pool3 = nn.MaxPool2d(kernel_size=[5, 5], stride=5)
self.pool4 = nn.MaxPool2d(kernel_size=[6, 6], stride=6)
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=1,
kernel_size=1, padding=0)
def forward(self, x):
self.in_channels, h, w = x.size(1), x.size(2), x.size(3)
self.layer1 = F.upsample(self.conv(self.pool1(x)), size=(h, w),
mode='bilinear')
self.layer2 = F.upsample(self.conv(self.pool2(x)), size=(h, w),
mode='bilinear')
self.layer3 = F.upsample(self.conv(self.pool3(x)), size=(h, w),
mode='bilinear')
self.layer4 = F.upsample(self.conv(self.pool4(x)), size=(h, w),
mode='bilinear')
out = torch.cat([self.layer1, self.layer2, self.layer3, self.layer4,
x], 1)
return out
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused__to_copy_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_2(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 31, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 7056
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 21
x1 = xindex // 21 % 21
x4 = xindex // 441
x3 = xindex // 1764
x5 = xindex % 1764
tmp0 = tl.load(in_ptr0 + (3 * x0 + 192 * x1 + 4096 * x4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 3 * x0 + 192 * x1 + 4096 * x4), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 3 * x0 + 192 * x1 + 4096 * x4), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (64 + 3 * x0 + 192 * x1 + 4096 * x4), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (65 + 3 * x0 + 192 * x1 + 4096 * x4), xmask,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (66 + 3 * x0 + 192 * x1 + 4096 * x4), xmask,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (128 + 3 * x0 + 192 * x1 + 4096 * x4), xmask,
eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (129 + 3 * x0 + 192 * x1 + 4096 * x4), xmask,
eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (130 + 3 * x0 + 192 * x1 + 4096 * x4), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tl.store(out_ptr0 + (x5 + 1792 * x3), tmp16, xmask)
@triton.jit
def triton_poi_fused__to_copy_5(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.328125
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_clamp_6(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.328125
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 20, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.328125
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = 1.0
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tl.store(out_ptr0 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12 % 12
x2 = xindex // 144
x3 = xindex
tmp0 = tl.load(in_ptr0 + (5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (64 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (65 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (66 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (67 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (68 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (128 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (129 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (130 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (131 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (132 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (192 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp31 = tl.load(in_ptr0 + (193 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (194 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp35 = tl.load(in_ptr0 + (195 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp37 = tl.load(in_ptr0 + (196 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp39 = tl.load(in_ptr0 + (256 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp41 = tl.load(in_ptr0 + (257 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp43 = tl.load(in_ptr0 + (258 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp45 = tl.load(in_ptr0 + (259 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp47 = tl.load(in_ptr0 + (260 + 5 * x0 + 320 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tmp32 = triton_helpers.maximum(tmp31, tmp30)
tmp34 = triton_helpers.maximum(tmp33, tmp32)
tmp36 = triton_helpers.maximum(tmp35, tmp34)
tmp38 = triton_helpers.maximum(tmp37, tmp36)
tmp40 = triton_helpers.maximum(tmp39, tmp38)
tmp42 = triton_helpers.maximum(tmp41, tmp40)
tmp44 = triton_helpers.maximum(tmp43, tmp42)
tmp46 = triton_helpers.maximum(tmp45, tmp44)
tmp48 = triton_helpers.maximum(tmp47, tmp46)
tl.store(out_ptr0 + x3, tmp48, xmask)
@triton.jit
def triton_poi_fused__to_copy_9(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.1875
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_clamp_10(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.1875
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 11, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.1875
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = 1.0
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tl.store(out_ptr0 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused__to_copy_12(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.15625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_clamp_13(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.15625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 9, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.15625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = 1.0
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tl.store(out_ptr0 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_15(in_out_ptr0,
in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10,
in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17,
in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24,
in_ptr25, in_ptr26, in_ptr27, in_ptr28, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x2 = xindex // 4096
x3 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + 0)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp13 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr9 + x0, None, eviction_policy='evict_last')
tmp49 = tl.load(in_ptr11 + x0, None, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr12 + x0, None, eviction_policy='evict_last')
tmp59 = tl.load(in_ptr13 + x1, None, eviction_policy='evict_last')
tmp71 = tl.load(in_ptr14 + x1, None, eviction_policy='evict_last')
tmp74 = tl.load(in_ptr15 + x1, None, eviction_policy='evict_last')
tmp79 = tl.load(in_ptr16 + x0, None, eviction_policy='evict_last')
tmp85 = tl.load(in_ptr18 + x0, None, eviction_policy='evict_last')
tmp92 = tl.load(in_ptr19 + x0, None, eviction_policy='evict_last')
tmp95 = tl.load(in_ptr20 + x1, None, eviction_policy='evict_last')
tmp107 = tl.load(in_ptr21 + x1, None, eviction_policy='evict_last')
tmp110 = tl.load(in_ptr22 + x1, None, eviction_policy='evict_last')
tmp115 = tl.load(in_ptr23 + x0, None, eviction_policy='evict_last')
tmp121 = tl.load(in_ptr25 + x0, None, eviction_policy='evict_last')
tmp128 = tl.load(in_ptr26 + x0, None, eviction_policy='evict_last')
tmp131 = tl.load(in_ptr27 + x1, None, eviction_policy='evict_last')
tmp143 = tl.load(in_ptr28 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 32 * tmp4 + 1024 * x2), None,
eviction_policy='evict_last')
tmp12 = tmp9 + tmp11
tmp14 = tmp13 + tmp1
tmp15 = tmp13 < 0
tmp16 = tl.where(tmp15, tmp14, tmp13)
tmp17 = tl.load(in_ptr2 + (tmp16 + 32 * tmp4 + 1024 * x2), None,
eviction_policy='evict_last')
tmp18 = tmp17 + tmp11
tmp19 = tmp18 - tmp12
tmp21 = tmp19 * tmp20
tmp22 = tmp12 + tmp21
tmp24 = tmp23 + tmp1
tmp25 = tmp23 < 0
tmp26 = tl.where(tmp25, tmp24, tmp23)
tmp27 = tl.load(in_ptr2 + (tmp8 + 32 * tmp26 + 1024 * x2), None,
eviction_policy='evict_last')
tmp28 = tmp27 + tmp11
tmp29 = tl.load(in_ptr2 + (tmp16 + 32 * tmp26 + 1024 * x2), None,
eviction_policy='evict_last')
tmp30 = tmp29 + tmp11
tmp31 = tmp30 - tmp28
tmp32 = tmp31 * tmp20
tmp33 = tmp28 + tmp32
tmp34 = tmp33 - tmp22
tmp36 = tmp34 * tmp35
tmp37 = tmp22 + tmp36
tmp39 = tl.full([XBLOCK], 21, tl.int32)
tmp40 = tmp38 + tmp39
tmp41 = tmp38 < 0
tmp42 = tl.where(tmp41, tmp40, tmp38)
tmp44 = tmp43 + tmp39
tmp45 = tmp43 < 0
tmp46 = tl.where(tmp45, tmp44, tmp43)
tmp47 = tl.load(in_ptr10 + (tmp46 + 21 * tmp42 + 441 * x2), None,
eviction_policy='evict_last')
tmp48 = tmp47 + tmp11
tmp50 = tmp49 + tmp39
tmp51 = tmp49 < 0
tmp52 = tl.where(tmp51, tmp50, tmp49)
tmp53 = tl.load(in_ptr10 + (tmp52 + 21 * tmp42 + 441 * x2), None,
eviction_policy='evict_last')
tmp54 = tmp53 + tmp11
tmp55 = tmp54 - tmp48
tmp57 = tmp55 * tmp56
tmp58 = tmp48 + tmp57
tmp60 = tmp59 + tmp39
tmp61 = tmp59 < 0
tmp62 = tl.where(tmp61, tmp60, tmp59)
tmp63 = tl.load(in_ptr10 + (tmp46 + 21 * tmp62 + 441 * x2), None,
eviction_policy='evict_last')
tmp64 = tmp63 + tmp11
tmp65 = tl.load(in_ptr10 + (tmp52 + 21 * tmp62 + 441 * x2), None,
eviction_policy='evict_last')
tmp66 = tmp65 + tmp11
tmp67 = tmp66 - tmp64
tmp68 = tmp67 * tmp56
tmp69 = tmp64 + tmp68
tmp70 = tmp69 - tmp58
tmp72 = tmp70 * tmp71
tmp73 = tmp58 + tmp72
tmp75 = tl.full([XBLOCK], 12, tl.int32)
tmp76 = tmp74 + tmp75
tmp77 = tmp74 < 0
tmp78 = tl.where(tmp77, tmp76, tmp74)
tmp80 = tmp79 + tmp75
tmp81 = tmp79 < 0
tmp82 = tl.where(tmp81, tmp80, tmp79)
tmp83 = tl.load(in_ptr17 + (tmp82 + 12 * tmp78 + 144 * x2), None,
eviction_policy='evict_last')
tmp84 = tmp83 + tmp11
tmp86 = tmp85 + tmp75
tmp87 = tmp85 < 0
tmp88 = tl.where(tmp87, tmp86, tmp85)
tmp89 = tl.load(in_ptr17 + (tmp88 + 12 * tmp78 + 144 * x2), None,
eviction_policy='evict_last')
tmp90 = tmp89 + tmp11
tmp91 = tmp90 - tmp84
tmp93 = tmp91 * tmp92
tmp94 = tmp84 + tmp93
tmp96 = tmp95 + tmp75
tmp97 = tmp95 < 0
tmp98 = tl.where(tmp97, tmp96, tmp95)
tmp99 = tl.load(in_ptr17 + (tmp82 + 12 * tmp98 + 144 * x2), None,
eviction_policy='evict_last')
tmp100 = tmp99 + tmp11
tmp101 = tl.load(in_ptr17 + (tmp88 + 12 * tmp98 + 144 * x2), None,
eviction_policy='evict_last')
tmp102 = tmp101 + tmp11
tmp103 = tmp102 - tmp100
tmp104 = tmp103 * tmp92
tmp105 = tmp100 + tmp104
tmp106 = tmp105 - tmp94
tmp108 = tmp106 * tmp107
tmp109 = tmp94 + tmp108
tmp111 = tl.full([XBLOCK], 10, tl.int32)
tmp112 = tmp110 + tmp111
tmp113 = tmp110 < 0
tmp114 = tl.where(tmp113, tmp112, tmp110)
tmp116 = tmp115 + tmp111
tmp117 = tmp115 < 0
tmp118 = tl.where(tmp117, tmp116, tmp115)
tmp119 = tl.load(in_ptr24 + (tmp118 + 10 * tmp114 + 100 * x2), None,
eviction_policy='evict_last')
tmp120 = tmp119 + tmp11
tmp122 = tmp121 + tmp111
tmp123 = tmp121 < 0
tmp124 = tl.where(tmp123, tmp122, tmp121)
tmp125 = tl.load(in_ptr24 + (tmp124 + 10 * tmp114 + 100 * x2), None,
eviction_policy='evict_last')
tmp126 = tmp125 + tmp11
tmp127 = tmp126 - tmp120
tmp129 = tmp127 * tmp128
tmp130 = tmp120 + tmp129
tmp132 = tmp131 + tmp111
tmp133 = tmp131 < 0
tmp134 = tl.where(tmp133, tmp132, tmp131)
tmp135 = tl.load(in_ptr24 + (tmp118 + 10 * tmp134 + 100 * x2), None,
eviction_policy='evict_last')
tmp136 = tmp135 + tmp11
tmp137 = tl.load(in_ptr24 + (tmp124 + 10 * tmp134 + 100 * x2), None,
eviction_policy='evict_last')
tmp138 = tmp137 + tmp11
tmp139 = tmp138 - tmp136
tmp140 = tmp139 * tmp128
tmp141 = tmp136 + tmp140
tmp142 = tmp141 - tmp130
tmp144 = tmp142 * tmp143
tmp145 = tmp130 + tmp144
tl.store(in_out_ptr0 + x3, tmp37, None)
tl.store(in_out_ptr1 + x3, tmp73, None)
tl.store(in_out_ptr2 + x3, tmp109, None)
tl.store(in_out_ptr3 + x3, tmp145, None)
@triton.jit
def triton_poi_fused_cat_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 8
x0 = xindex % 4096
x2 = xindex // 32768
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x2), tmp4, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 4096 * x2), tmp9, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 4096 * x2), tmp14, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 4, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr3 + (x0 + 4096 * x2), tmp19, eviction_policy=
'evict_last', other=0.0)
tmp21 = tmp0 >= tmp17
tl.full([1], 8, tl.int64)
tmp24 = tl.load(in_ptr4 + (x0 + 4096 * (-4 + x1) + 16384 * x2), tmp21,
other=0.0)
tmp25 = tl.where(tmp19, tmp20, tmp24)
tmp26 = tl.where(tmp14, tmp15, tmp25)
tmp27 = tl.where(tmp9, tmp10, tmp26)
tmp28 = tl.where(tmp4, tmp5, tmp27)
tl.store(out_ptr0 + x3, tmp28, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(16384)](primals_1,
buf0, 16384, XBLOCK=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 32, 32), (1024, 1024, 32, 1))
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_1[grid(64)](buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_2[grid(64)](buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_1[grid(64)](buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_add_clamp_2[grid(64)](buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((64,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3[grid(64)](buf6,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 21, 21), (1792, 441, 21, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_4[grid(7056)](primals_1,
buf11, 7056, XBLOCK=256, num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf11, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 21, 21), (441, 441, 21, 1))
buf13 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_5[grid(64)](buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_6[grid(64)](buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_5[grid(64)](buf15, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_add_clamp_6[grid(64)](buf16, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((64,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7[grid(64)](buf17,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7[grid(64)](buf19,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf22 = empty_strided_cuda((4, 4, 12, 12), (576, 144, 12, 1), torch
.float32)
triton_poi_fused_max_pool2d_with_indices_8[grid(2304)](primals_1,
buf22, 2304, XBLOCK=128, num_warps=4, num_stages=1)
buf23 = extern_kernels.convolution(buf22, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 1, 12, 12), (144, 144, 12, 1))
buf24 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_9[grid(64)](buf24, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf25 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_10[grid(64)](buf25, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf26 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_9[grid(64)](buf26, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf27 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_add_clamp_10[grid(64)](buf27, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf28 = empty_strided_cuda((64,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11[grid(64)](buf28,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf30 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11[grid(64)](buf30,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf33 = torch.ops.aten.max_pool2d_with_indices.default(primals_1, [
6, 6], [6, 6])
buf34 = buf33[0]
del buf33
buf36 = extern_kernels.convolution(buf34, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 1, 10, 10), (100, 100, 10, 1))
buf37 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_12[grid(64)](buf37, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf38 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_13[grid(64)](buf38, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf39 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_12[grid(64)](buf39, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf40 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_add_clamp_13[grid(64)](buf40, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf41 = empty_strided_cuda((64,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14[grid(64)](buf41,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf43 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14[grid(64)](buf43,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3[grid(64)](buf8,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1),
torch.float32)
buf10 = reinterpret_tensor(buf9, (4, 1, 64, 64), (4096, 4096, 64, 1), 0
)
del buf9
buf20 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1),
torch.float32)
buf21 = reinterpret_tensor(buf20, (4, 1, 64, 64), (4096, 4096, 64,
1), 0)
del buf20
buf31 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1),
torch.float32)
buf32 = reinterpret_tensor(buf31, (4, 1, 64, 64), (4096, 4096, 64,
1), 0)
del buf31
buf44 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1),
torch.float32)
buf45 = reinterpret_tensor(buf44, (4, 1, 64, 64), (4096, 4096, 64,
1), 0)
del buf44
triton_poi_fused__unsafe_index_add_convolution_mul_sub_15[grid(16384)](
buf10, buf21, buf32, buf45, buf2, buf4, buf1, primals_3, buf5,
buf6, buf3, buf8, buf13, buf15, buf12, buf16, buf17, buf14,
buf19, buf24, buf26, buf23, buf27, buf28, buf25, buf30, buf37,
buf39, buf36, buf40, buf41, buf38, buf43, 16384, XBLOCK=128,
num_warps=4, num_stages=1)
del buf1
del buf12
del buf23
del buf36
del primals_3
buf46 = empty_strided_cuda((4, 8, 64, 64), (32768, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_16[grid(131072)](buf10, buf21, buf32, buf45,
primals_1, buf46, 131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_1
return (buf46, buf45, buf32, buf21, buf10, primals_2, buf0, buf2, buf3,
buf4, buf5, buf6, buf8, buf11, buf13, buf14, buf15, buf16, buf17,
buf19, buf22, buf24, buf25, buf26, buf27, buf28, buf30, buf34,
buf37, buf38, buf39, buf40, buf41, buf43)
class SPPblockNew(nn.Module):
def __init__(self, in_channels):
super(SPPblockNew, self).__init__()
self.pool1 = nn.MaxPool2d(kernel_size=[2, 2], stride=2)
self.pool2 = nn.MaxPool2d(kernel_size=[3, 3], stride=3)
self.pool3 = nn.MaxPool2d(kernel_size=[5, 5], stride=5)
self.pool4 = nn.MaxPool2d(kernel_size=[6, 6], stride=6)
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=1,
kernel_size=1, padding=0)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| zxg3017/CUSE-Net | SPPblock | false | 13,207 | [
"MIT"
]
| 0 | ea1d07027f89130a8a40465de94528f23eb9f5d1 | https://github.com/zxg3017/CUSE-Net/tree/ea1d07027f89130a8a40465de94528f23eb9f5d1 |
SoftMaxAvgPoolModel | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/qz/cqza6p5fjiie2hfiu5dfjqqugrnzziwuwxzlhzy2aa7khopxjbym.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# x => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/v4/cv4nyn2kde7dd2c53ddahw4vtxyldln6pqt62jrliqindkf3sj5m.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# x => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/lj/cljk62ivqbhrektss5kobld3fxma7fnybhu4xiqst4whgfairhqj.py
# Topologically Sorted Source Nodes: [x, avg_pool2d], Original ATen: [aten._softmax, aten.avg_pool2d]
# Source node to ATen node mapping:
# avg_pool2d => avg_pool2d
# x => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%div, [3, 3], [3, 3]), kwargs = {})
triton_poi_fused__softmax_avg_pool2d_2 = async_compile.triton('triton_poi_fused__softmax_avg_pool2d_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_avg_pool2d_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_avg_pool2d_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp17 = 0.1111111111111111
tmp18 = tmp16 * tmp17
tl.store(out_ptr0 + (x0), tmp18, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
del buf0
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, avg_pool2d], Original ATen: [aten._softmax, aten.avg_pool2d]
triton_poi_fused__softmax_avg_pool2d_2.run(buf1, buf2, 16, grid=grid(16), stream=stream0)
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.cuda
import torch.nn
import torch.utils.data
import torch.fx
import torch.utils.tensorboard._pytorch_graph
import torch.onnx.symbolic_caffe2
class SoftMaxAvgPoolModel(torch.nn.Module):
def __init__(self):
super(SoftMaxAvgPoolModel, self).__init__()
self.sfmax = torch.nn.Softmax(dim=1)
self.avgpool = torch.nn.AvgPool2d(3)
def forward(self, inp):
x = self.sfmax(inp)
return self.avgpool(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.cuda
import torch.nn
import torch.utils.data
import torch.fx
import torch.utils.tensorboard._pytorch_graph
import torch.onnx.symbolic_caffe2
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__softmax_avg_pool2d_2(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp17 = 0.1111111111111111
tmp18 = tmp16 * tmp17
tl.store(out_ptr0 + x0, tmp18, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf0
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused__softmax_avg_pool2d_2[grid(16)](buf1, buf2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf1
return buf2,
class SoftMaxAvgPoolModelNew(torch.nn.Module):
def __init__(self):
super(SoftMaxAvgPoolModelNew, self).__init__()
self.sfmax = torch.nn.Softmax(dim=1)
self.avgpool = torch.nn.AvgPool2d(3)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| quic-kyunggeu/aimet | SoftMaxAvgPoolModel | false | 13,208 | [
"BSD-3-Clause"
]
| 0 | 877835d5aafcef17cf12864124977d3c128d4aca | https://github.com/quic-kyunggeu/aimet/tree/877835d5aafcef17cf12864124977d3c128d4aca |
MIRB2 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/34/c34lnuw3wok4euydis4jz2cumdv5zl53hr2km2mr6sokjcomm2j6.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm => pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_3, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1, 2, 3], True), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
triton_poi_fused__weight_norm_interface_0 = async_compile.triton('triton_poi_fused__weight_norm_interface_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (6*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (6*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (6*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (6*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (4 + (6*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (5 + (6*x0)), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tl.store(out_ptr0 + (x0), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/4h/c4hyivh4rlcg5ap7pj7xbbqscs2u2jjb4g5w4jaidlc7wpyz7lbi.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm => div, mul
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %pow_2), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %div), kwargs = {})
triton_poi_fused__weight_norm_interface_1 = async_compile.triton('triton_poi_fused__weight_norm_interface_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 108
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 6)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ij/cijtnciymslu5jmlqs4skpm4lp2nibv5hh3th6b5i2szhhqwwhmh.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %mul, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 3), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 294912
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 18
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/mz/cmzalccs4mduopsp2yd4zky3wwaeepvbubqparxslix3sntpfkub.py
# Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm_1 => div_1, mul_1, pow_3, pow_4, sum_2
# Graph fragment:
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_6, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1, 2, 3], True), kwargs = {})
# %pow_4 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_5, %pow_4), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_6, %div_1), kwargs = {})
triton_per_fused__weight_norm_interface_3 = async_compile.triton('triton_per_fused__weight_norm_interface_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[32, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__weight_norm_interface_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 18
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (9*x0)), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr0 + (r1 + (9*x0)), tmp9, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/dy/cdylggyn2ws3sfvdukozllsfp2dvd7jch6eitsmkngzuzggssgxt.py
# Topologically Sorted Source Nodes: [_weight_norm_2], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm_2 => div_2, mul_2, pow_5, pow_6, sum_3
# Graph fragment:
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_9, 2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [1, 2, 3], True), kwargs = {})
# %pow_6 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 0.5), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_8, %pow_6), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_9, %div_2), kwargs = {})
triton_per_fused__weight_norm_interface_4 = async_compile.triton('triton_per_fused__weight_norm_interface_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 32],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__weight_norm_interface_4(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 9
rnumel = 18
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (18*x0)), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr0 + (r1 + (18*x0)), tmp9, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/4r/c4rv2lnvemalwfmwemwkkjuqab3eovephhf3dkwsqaqrzbzurn6e.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%where, %where_1], 1), kwargs = {})
triton_poi_fused_cat_5 = async_compile.triton('triton_poi_fused_cat_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 294912
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 18
x0 = xindex % 4096
x2 = (xindex // 73728)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 9, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (36864*x2)), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = 0.0
tmp9 = tmp7 > tmp8
tmp10 = 0.2
tmp11 = tmp7 * tmp10
tmp12 = tl.where(tmp9, tmp7, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 18, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tl.load(in_ptr2 + (x0 + (4096*((-9) + x1)) + (36864*x2)), tmp15, other=0.0)
tmp19 = tl.load(in_ptr3 + ((-9) + x1), tmp15, eviction_policy='evict_last', other=0.0)
tmp20 = tmp18 + tmp19
tmp21 = tmp20 > tmp8
tmp22 = tmp20 * tmp10
tmp23 = tl.where(tmp21, tmp20, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp15, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp14, tmp25)
tl.store(out_ptr0 + (x3), tmp26, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/nt/cntzp5nakwvul7tcparxahaerbizrgojsic6at2feukyiwjvutbv.py
# Topologically Sorted Source Nodes: [_weight_norm_18], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm_18 => div_18, mul_24, pow_37, pow_38, sum_19
# Graph fragment:
# %pow_37 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_57, 2), kwargs = {})
# %sum_19 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_37, [1, 2, 3], True), kwargs = {})
# %pow_38 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_19, 0.5), kwargs = {})
# %div_18 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_56, %pow_38), kwargs = {})
# %mul_24 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_57, %div_18), kwargs = {})
triton_per_fused__weight_norm_interface_6 = async_compile.triton('triton_per_fused__weight_norm_interface_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[32, 32],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__weight_norm_interface_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 18
rnumel = 18
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (18*x0)), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr0 + (r1 + (18*x0)), tmp9, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/yr/cyrzws2lnjde2as7gzejnwjcj4lymm7bgicdqzp4zy6p5j7qtkpv.py
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# out => convolution_18
# out_1 => add
# Graph fragment:
# %convolution_18 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_4, %mul_24, %primals_58, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_18, %primals_1), kwargs = {})
triton_poi_fused_add_convolution_7 = async_compile.triton('triton_poi_fused_add_convolution_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 294912
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 18
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/al/callo7bieu2ffgjrumilpfnh66yj2nuuap5g7jnjsb3x7g3p5ot7.py
# Topologically Sorted Source Nodes: [x_17, c2_4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# c2_4 => gt_5, mul_23, where_5
# x_17 => convolution_17
# Graph fragment:
# %convolution_17 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_16, %mul_22, %primals_55, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_17, 0), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_17, 0.2), kwargs = {})
# %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %convolution_17, %mul_23), kwargs = {})
# %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_5, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 147456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 9
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + (x3), tmp8, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58 = args
args.clear()
assert_size_stride(primals_1, (4, 18, 64, 64), (73728, 4096, 64, 1))
assert_size_stride(primals_2, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_3, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_4, (18, ), (1, ))
assert_size_stride(primals_5, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_6, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_7, (18, ), (1, ))
assert_size_stride(primals_8, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_9, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_10, (9, ), (1, ))
assert_size_stride(primals_11, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_12, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_13, (18, ), (1, ))
assert_size_stride(primals_14, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_15, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_16, (18, ), (1, ))
assert_size_stride(primals_17, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_18, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_19, (9, ), (1, ))
assert_size_stride(primals_20, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_21, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_22, (18, ), (1, ))
assert_size_stride(primals_23, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_24, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_25, (18, ), (1, ))
assert_size_stride(primals_26, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_27, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_28, (9, ), (1, ))
assert_size_stride(primals_29, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_30, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_31, (18, ), (1, ))
assert_size_stride(primals_32, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_33, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_34, (18, ), (1, ))
assert_size_stride(primals_35, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_36, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_37, (9, ), (1, ))
assert_size_stride(primals_38, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_39, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_40, (18, ), (1, ))
assert_size_stride(primals_41, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_42, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_43, (18, ), (1, ))
assert_size_stride(primals_44, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_45, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_46, (9, ), (1, ))
assert_size_stride(primals_47, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_48, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_49, (18, ), (1, ))
assert_size_stride(primals_50, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_51, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_52, (18, ), (1, ))
assert_size_stride(primals_53, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_54, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_55, (9, ), (1, ))
assert_size_stride(primals_56, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_57, (18, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_58, (18, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
stream0 = get_raw_stream(0)
triton_poi_fused__weight_norm_interface_0.run(primals_3, buf0, 18, grid=grid(18), stream=stream0)
buf1 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_3, primals_2, buf0, buf1, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_1, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf2, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf3, primals_4, 294912, grid=grid(294912), stream=stream0)
del primals_4
buf4 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf5 = reinterpret_tensor(buf4, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf4 # reuse
buf6 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf5, primals_6, primals_5, buf6, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf3, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf7, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf8, primals_7, 294912, grid=grid(294912), stream=stream0)
del primals_7
buf9 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf10 = reinterpret_tensor(buf9, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf9 # reuse
buf11 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_2], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf10, primals_9, primals_8, buf11, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf8, buf11, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf13 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_3], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_12, buf13, 18, grid=grid(18), stream=stream0)
buf14 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_3], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_12, primals_11, buf13, buf14, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf15 = extern_kernels.convolution(primals_1, buf14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf15, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf16 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf16, primals_13, 294912, grid=grid(294912), stream=stream0)
del primals_13
buf17 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf18 = reinterpret_tensor(buf17, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf17 # reuse
buf19 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_4], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf18, primals_15, primals_14, buf19, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf16, buf19, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf20, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf21, primals_16, 294912, grid=grid(294912), stream=stream0)
del primals_16
buf22 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf23 = reinterpret_tensor(buf22, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf22 # reuse
buf24 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_5], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf23, primals_18, primals_17, buf24, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution]
buf25 = extern_kernels.convolution(buf21, buf24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf26 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf12, primals_10, buf25, primals_19, buf26, 294912, grid=grid(294912), stream=stream0)
buf27 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_6], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_21, buf27, 18, grid=grid(18), stream=stream0)
buf28 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_6], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_21, primals_20, buf27, buf28, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
buf29 = extern_kernels.convolution(buf26, buf28, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf29, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf30 = buf29; del buf29 # reuse
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf30, primals_22, 294912, grid=grid(294912), stream=stream0)
del primals_22
buf31 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf32 = reinterpret_tensor(buf31, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf31 # reuse
buf33 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_7], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf32, primals_24, primals_23, buf33, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution]
buf34 = extern_kernels.convolution(buf30, buf33, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf34, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf35 = buf34; del buf34 # reuse
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf35, primals_25, 294912, grid=grid(294912), stream=stream0)
del primals_25
buf36 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf37 = reinterpret_tensor(buf36, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf36 # reuse
buf38 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_8], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf37, primals_27, primals_26, buf38, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
buf39 = extern_kernels.convolution(buf35, buf38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf40 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_9], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_30, buf40, 18, grid=grid(18), stream=stream0)
buf41 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_9], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_30, primals_29, buf40, buf41, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution]
buf42 = extern_kernels.convolution(buf26, buf41, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf42, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf43 = buf42; del buf42 # reuse
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf43, primals_31, 294912, grid=grid(294912), stream=stream0)
del primals_31
buf44 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf45 = reinterpret_tensor(buf44, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf44 # reuse
buf46 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_10], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf45, primals_33, primals_32, buf46, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution]
buf47 = extern_kernels.convolution(buf43, buf46, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf47, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf48 = buf47; del buf47 # reuse
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf48, primals_34, 294912, grid=grid(294912), stream=stream0)
del primals_34
buf49 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf50 = reinterpret_tensor(buf49, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf49 # reuse
buf51 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_11], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf50, primals_36, primals_35, buf51, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution]
buf52 = extern_kernels.convolution(buf48, buf51, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf53 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf39, primals_28, buf52, primals_37, buf53, 294912, grid=grid(294912), stream=stream0)
buf54 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_12], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_39, buf54, 18, grid=grid(18), stream=stream0)
buf55 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_12], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_39, primals_38, buf54, buf55, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution]
buf56 = extern_kernels.convolution(buf53, buf55, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf56, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf57 = buf56; del buf56 # reuse
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf57, primals_40, 294912, grid=grid(294912), stream=stream0)
del primals_40
buf58 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf59 = reinterpret_tensor(buf58, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf58 # reuse
buf60 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_13], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf59, primals_42, primals_41, buf60, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.convolution]
buf61 = extern_kernels.convolution(buf57, buf60, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf61, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf62 = buf61; del buf61 # reuse
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf62, primals_43, 294912, grid=grid(294912), stream=stream0)
del primals_43
buf63 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf64 = reinterpret_tensor(buf63, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf63 # reuse
buf65 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_14], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf64, primals_45, primals_44, buf65, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution]
buf66 = extern_kernels.convolution(buf62, buf65, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf67 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_15], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_48, buf67, 18, grid=grid(18), stream=stream0)
buf68 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_15], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_48, primals_47, buf67, buf68, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution]
buf69 = extern_kernels.convolution(buf53, buf68, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf69, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf70 = buf69; del buf69 # reuse
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf70, primals_49, 294912, grid=grid(294912), stream=stream0)
del primals_49
buf71 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf72 = reinterpret_tensor(buf71, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf71 # reuse
buf73 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_16], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf72, primals_51, primals_50, buf73, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution]
buf74 = extern_kernels.convolution(buf70, buf73, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf74, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf75 = buf74; del buf74 # reuse
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf75, primals_52, 294912, grid=grid(294912), stream=stream0)
del primals_52
buf76 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf77 = reinterpret_tensor(buf76, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf76 # reuse
buf78 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_17], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf77, primals_54, primals_53, buf78, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_17], Original ATen: [aten.convolution]
buf79 = extern_kernels.convolution(buf75, buf78, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf79, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf80 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_4], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf66, primals_46, buf79, primals_55, buf80, 294912, grid=grid(294912), stream=stream0)
buf81 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf82 = reinterpret_tensor(buf81, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf81 # reuse
buf83 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_18], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_6.run(buf82, primals_57, primals_56, buf83, 18, 18, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf84 = extern_kernels.convolution(buf80, buf83, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf84, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf85 = buf84; del buf84 # reuse
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_7.run(buf85, primals_58, primals_1, 294912, grid=grid(294912), stream=stream0)
del primals_58
buf86 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_17, c2_4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8.run(buf79, primals_55, buf86, 147456, grid=grid(147456), stream=stream0)
del buf79
del primals_55
buf87 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_14, c1_4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8.run(buf66, primals_46, buf87, 147456, grid=grid(147456), stream=stream0)
del buf66
del primals_46
buf88 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_11, c2_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8.run(buf52, primals_37, buf88, 147456, grid=grid(147456), stream=stream0)
del buf52
del primals_37
buf89 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_8, c1_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8.run(buf39, primals_28, buf89, 147456, grid=grid(147456), stream=stream0)
del buf39
del primals_28
buf90 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_5, c2_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8.run(buf25, primals_19, buf90, 147456, grid=grid(147456), stream=stream0)
del buf25
del primals_19
buf91 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_2, c1_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8.run(buf12, primals_10, buf91, 147456, grid=grid(147456), stream=stream0)
del buf12
del primals_10
return (buf85, buf1, buf6, buf11, buf14, buf19, buf24, buf28, buf33, buf38, buf41, buf46, buf51, buf55, buf60, buf65, buf68, buf73, buf78, buf83, primals_1, primals_2, primals_3, primals_5, primals_6, primals_8, primals_9, primals_11, primals_12, primals_14, primals_15, primals_17, primals_18, primals_20, primals_21, primals_23, primals_24, primals_26, primals_27, primals_29, primals_30, primals_32, primals_33, primals_35, primals_36, primals_38, primals_39, primals_41, primals_42, primals_44, primals_45, primals_47, primals_48, primals_50, primals_51, primals_53, primals_54, primals_56, primals_57, buf0, buf1, buf3, buf5, buf6, buf8, buf10, buf11, buf13, buf14, buf16, buf18, buf19, buf21, buf23, buf24, buf26, buf27, buf28, buf30, buf32, buf33, buf35, buf37, buf38, buf40, buf41, buf43, buf45, buf46, buf48, buf50, buf51, buf53, buf54, buf55, buf57, buf59, buf60, buf62, buf64, buf65, buf67, buf68, buf70, buf72, buf73, buf75, buf77, buf78, buf80, buf82, buf83, buf86, buf87, buf88, buf89, buf90, buf91, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 18, 64, 64), (73728, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_48 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_49 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_50 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_51 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_52 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_53 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_54 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_55 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_56 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_57 = rand_strided((18, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_58 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, groups=3):
super(ConvBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
def wn(x):
return torch.nn.utils.weight_norm(x)
self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
1, groups=self.groups))
self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
3, padding=1, groups=in_channels))
self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels,
1, groups=1))
def forward(self, x):
x = self.group_conv(x)
x = self.depth_conv(x)
x = self.point_conv(x)
return x
class ConvBlockD(nn.Module):
def __init__(self, in_channels, out_channels, groups=3, ker_size=2):
super(ConvBlockD, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
def wn(x):
return torch.nn.utils.weight_norm(x)
self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
1, groups=self.groups))
self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
3, padding=ker_size, dilation=ker_size, groups=in_channels))
self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels,
1, groups=1))
def forward(self, x):
x = self.group_conv(x)
x = self.depth_conv(x)
x = self.point_conv(x)
return x
class MIRB2(nn.Module):
def __init__(self, args):
super(MIRB2, self).__init__()
self.c_out = args.n_feats // 2
def wn(x):
return torch.nn.utils.weight_norm(x)
self.conv3_1 = ConvBlock(args.n_feats, self.c_out)
self.convd_1 = ConvBlockD(args.n_feats, self.c_out, ker_size=2)
self.conv3_2 = ConvBlock(args.n_feats, self.c_out)
self.convd_2 = ConvBlockD(args.n_feats, self.c_out, ker_size=2)
self.conv3_3 = ConvBlock(args.n_feats, self.c_out)
self.convd_3 = ConvBlockD(args.n_feats, self.c_out, ker_size=2)
self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1))
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
res = x
c1_1 = self.lrelu(self.conv3_1(res))
c2_1 = self.lrelu(self.convd_1(res))
c1_2 = self.lrelu(self.conv3_2(torch.cat([c1_1, c2_1], 1)))
c2_2 = self.lrelu(self.convd_2(torch.cat([c1_1, c2_1], 1)))
c1_4 = self.lrelu(self.conv3_3(torch.cat([c1_2, c2_2], 1)))
c2_4 = self.lrelu(self.convd_3(torch.cat([c1_2, c2_2], 1)))
out = self.conv_last(torch.cat([c1_4, c2_4], 1))
out = out + x
return out
def get_inputs():
return [torch.rand([4, 18, 64, 64])]
def get_init_inputs():
return [[], {'args': _mock_config(n_feats=18)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 18
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 6 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 6 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 6 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 6 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (4 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (5 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tl.store(out_ptr0 + x0, tmp17, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 108
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 6
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 18
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_per_fused__weight_norm_interface_3(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 18
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 9 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 9 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_per_fused__weight_norm_interface_4(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 9
rnumel = 18
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 18 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 18 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 18
x0 = xindex % 4096
x2 = xindex // 73728
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 9, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 36864 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = 0.0
tmp9 = tmp7 > tmp8
tmp10 = 0.2
tmp11 = tmp7 * tmp10
tmp12 = tl.where(tmp9, tmp7, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tl.full([1], 18, tl.int64)
tmp18 = tl.load(in_ptr2 + (x0 + 4096 * (-9 + x1) + 36864 * x2), tmp15,
other=0.0)
tmp19 = tl.load(in_ptr3 + (-9 + x1), tmp15, eviction_policy=
'evict_last', other=0.0)
tmp20 = tmp18 + tmp19
tmp21 = tmp20 > tmp8
tmp22 = tmp20 * tmp10
tmp23 = tl.where(tmp21, tmp20, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp15, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp14, tmp25)
tl.store(out_ptr0 + x3, tmp26, None)
@triton.jit
def triton_per_fused__weight_norm_interface_6(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 18
rnumel = 18
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 18 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 18 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_poi_fused_add_convolution_7(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 18
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 9
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + x3, tmp8, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57, primals_58
) = args
args.clear()
assert_size_stride(primals_1, (4, 18, 64, 64), (73728, 4096, 64, 1))
assert_size_stride(primals_2, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_3, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_4, (18,), (1,))
assert_size_stride(primals_5, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_6, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_7, (18,), (1,))
assert_size_stride(primals_8, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_9, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_10, (9,), (1,))
assert_size_stride(primals_11, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_12, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_13, (18,), (1,))
assert_size_stride(primals_14, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_15, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_16, (18,), (1,))
assert_size_stride(primals_17, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_18, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_19, (9,), (1,))
assert_size_stride(primals_20, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_21, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_22, (18,), (1,))
assert_size_stride(primals_23, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_24, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_25, (18,), (1,))
assert_size_stride(primals_26, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_27, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_28, (9,), (1,))
assert_size_stride(primals_29, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_30, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_31, (18,), (1,))
assert_size_stride(primals_32, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_33, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_34, (18,), (1,))
assert_size_stride(primals_35, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_36, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_37, (9,), (1,))
assert_size_stride(primals_38, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_39, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_40, (18,), (1,))
assert_size_stride(primals_41, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_42, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_43, (18,), (1,))
assert_size_stride(primals_44, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_45, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_46, (9,), (1,))
assert_size_stride(primals_47, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_48, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_49, (18,), (1,))
assert_size_stride(primals_50, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_51, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_52, (18,), (1,))
assert_size_stride(primals_53, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_54, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_55, (9,), (1,))
assert_size_stride(primals_56, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_57, (18, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_58, (18,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_3, buf0,
18, XBLOCK=32, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_3,
primals_2, buf0, buf1, 108, XBLOCK=128, num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(primals_1, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf2, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_2[grid(294912)](buf3, primals_4,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32
)
buf5 = reinterpret_tensor(buf4, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf4
buf6 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf5, primals_6,
primals_5, buf6, 18, 9, XBLOCK=32, num_warps=4, num_stages=1)
buf7 = extern_kernels.convolution(buf3, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf7, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf8 = buf7
del buf7
triton_poi_fused_convolution_2[grid(294912)](buf8, primals_7,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf9 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf10 = reinterpret_tensor(buf9, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf9
buf11 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf10, primals_9,
primals_8, buf11, 9, 18, XBLOCK=1, num_warps=2, num_stages=1)
buf12 = extern_kernels.convolution(buf8, buf11, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf13 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_12,
buf13, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_12,
primals_11, buf13, buf14, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf15 = extern_kernels.convolution(primals_1, buf14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf15, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf16 = buf15
del buf15
triton_poi_fused_convolution_2[grid(294912)](buf16, primals_13,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_13
buf17 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf18 = reinterpret_tensor(buf17, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf17
buf19 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf18,
primals_15, primals_14, buf19, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf20 = extern_kernels.convolution(buf16, buf19, stride=(1, 1),
padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf20, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_2[grid(294912)](buf21, primals_16,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_16
buf22 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf23 = reinterpret_tensor(buf22, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf22
buf24 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf23,
primals_18, primals_17, buf24, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf25 = extern_kernels.convolution(buf21, buf24, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf26 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf12, primals_10, buf25,
primals_19, buf26, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf27 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_21,
buf27, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf28 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_21,
primals_20, buf27, buf28, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf29 = extern_kernels.convolution(buf26, buf28, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf29, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf30 = buf29
del buf29
triton_poi_fused_convolution_2[grid(294912)](buf30, primals_22,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_22
buf31 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf32 = reinterpret_tensor(buf31, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf31
buf33 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf32,
primals_24, primals_23, buf33, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf34 = extern_kernels.convolution(buf30, buf33, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf34, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf35 = buf34
del buf34
triton_poi_fused_convolution_2[grid(294912)](buf35, primals_25,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_25
buf36 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf37 = reinterpret_tensor(buf36, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf36
buf38 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf37,
primals_27, primals_26, buf38, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf39 = extern_kernels.convolution(buf35, buf38, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf40 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_30,
buf40, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf41 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_30,
primals_29, buf40, buf41, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf42 = extern_kernels.convolution(buf26, buf41, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf42, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf43 = buf42
del buf42
triton_poi_fused_convolution_2[grid(294912)](buf43, primals_31,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_31
buf44 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf45 = reinterpret_tensor(buf44, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf44
buf46 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf45,
primals_33, primals_32, buf46, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf47 = extern_kernels.convolution(buf43, buf46, stride=(1, 1),
padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf47, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf48 = buf47
del buf47
triton_poi_fused_convolution_2[grid(294912)](buf48, primals_34,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_34
buf49 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf50 = reinterpret_tensor(buf49, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf49
buf51 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf50,
primals_36, primals_35, buf51, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf52 = extern_kernels.convolution(buf48, buf51, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf53 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf39, primals_28, buf52,
primals_37, buf53, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf54 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_39,
buf54, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf55 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_39,
primals_38, buf54, buf55, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf56 = extern_kernels.convolution(buf53, buf55, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf56, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf57 = buf56
del buf56
triton_poi_fused_convolution_2[grid(294912)](buf57, primals_40,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_40
buf58 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf59 = reinterpret_tensor(buf58, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf58
buf60 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf59,
primals_42, primals_41, buf60, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf61 = extern_kernels.convolution(buf57, buf60, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf61, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf62 = buf61
del buf61
triton_poi_fused_convolution_2[grid(294912)](buf62, primals_43,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_43
buf63 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf64 = reinterpret_tensor(buf63, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf63
buf65 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf64,
primals_45, primals_44, buf65, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf66 = extern_kernels.convolution(buf62, buf65, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf67 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_48,
buf67, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf68 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_48,
primals_47, buf67, buf68, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf69 = extern_kernels.convolution(buf53, buf68, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf69, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf70 = buf69
del buf69
triton_poi_fused_convolution_2[grid(294912)](buf70, primals_49,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_49
buf71 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf72 = reinterpret_tensor(buf71, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf71
buf73 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf72,
primals_51, primals_50, buf73, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf74 = extern_kernels.convolution(buf70, buf73, stride=(1, 1),
padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf74, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf75 = buf74
del buf74
triton_poi_fused_convolution_2[grid(294912)](buf75, primals_52,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_52
buf76 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf77 = reinterpret_tensor(buf76, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf76
buf78 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf77,
primals_54, primals_53, buf78, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf79 = extern_kernels.convolution(buf75, buf78, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf79, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf80 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf66, primals_46, buf79,
primals_55, buf80, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf81 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf82 = reinterpret_tensor(buf81, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf81
buf83 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_6[grid(18)](buf82,
primals_57, primals_56, buf83, 18, 18, XBLOCK=32, num_warps=8,
num_stages=1)
buf84 = extern_kernels.convolution(buf80, buf83, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf84, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf85 = buf84
del buf84
triton_poi_fused_add_convolution_7[grid(294912)](buf85, primals_58,
primals_1, 294912, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_58
buf86 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid(
147456)](buf79, primals_55, buf86, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf79
del primals_55
buf87 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid(
147456)](buf66, primals_46, buf87, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf66
del primals_46
buf88 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid(
147456)](buf52, primals_37, buf88, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf52
del primals_37
buf89 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid(
147456)](buf39, primals_28, buf89, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf39
del primals_28
buf90 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid(
147456)](buf25, primals_19, buf90, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf25
del primals_19
buf91 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid(
147456)](buf12, primals_10, buf91, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf12
del primals_10
return (buf85, buf1, buf6, buf11, buf14, buf19, buf24, buf28, buf33,
buf38, buf41, buf46, buf51, buf55, buf60, buf65, buf68, buf73,
buf78, buf83, primals_1, primals_2, primals_3, primals_5, primals_6,
primals_8, primals_9, primals_11, primals_12, primals_14,
primals_15, primals_17, primals_18, primals_20, primals_21,
primals_23, primals_24, primals_26, primals_27, primals_29,
primals_30, primals_32, primals_33, primals_35, primals_36,
primals_38, primals_39, primals_41, primals_42, primals_44,
primals_45, primals_47, primals_48, primals_50, primals_51,
primals_53, primals_54, primals_56, primals_57, buf0, buf1, buf3,
buf5, buf6, buf8, buf10, buf11, buf13, buf14, buf16, buf18, buf19,
buf21, buf23, buf24, buf26, buf27, buf28, buf30, buf32, buf33,
buf35, buf37, buf38, buf40, buf41, buf43, buf45, buf46, buf48,
buf50, buf51, buf53, buf54, buf55, buf57, buf59, buf60, buf62,
buf64, buf65, buf67, buf68, buf70, buf72, buf73, buf75, buf77,
buf78, buf80, buf82, buf83, buf86, buf87, buf88, buf89, buf90, buf91)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, groups=3):
super(ConvBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
def wn(x):
return torch.nn.utils.weight_norm(x)
self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
1, groups=self.groups))
self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
3, padding=1, groups=in_channels))
self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels,
1, groups=1))
def forward(self, x):
x = self.group_conv(x)
x = self.depth_conv(x)
x = self.point_conv(x)
return x
class ConvBlockD(nn.Module):
def __init__(self, in_channels, out_channels, groups=3, ker_size=2):
super(ConvBlockD, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
def wn(x):
return torch.nn.utils.weight_norm(x)
self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
1, groups=self.groups))
self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
3, padding=ker_size, dilation=ker_size, groups=in_channels))
self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels,
1, groups=1))
def forward(self, x):
x = self.group_conv(x)
x = self.depth_conv(x)
x = self.point_conv(x)
return x
class MIRB2New(nn.Module):
def __init__(self, args):
super(MIRB2New, self).__init__()
self.c_out = args.n_feats // 2
def wn(x):
return torch.nn.utils.weight_norm(x)
self.conv3_1 = ConvBlock(args.n_feats, self.c_out)
self.convd_1 = ConvBlockD(args.n_feats, self.c_out, ker_size=2)
self.conv3_2 = ConvBlock(args.n_feats, self.c_out)
self.convd_2 = ConvBlockD(args.n_feats, self.c_out, ker_size=2)
self.conv3_3 = ConvBlock(args.n_feats, self.c_out)
self.convd_3 = ConvBlockD(args.n_feats, self.c_out, ker_size=2)
self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1))
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, input_0):
primals_4 = self.conv3_1.group_conv.bias
primals_2 = self.conv3_1.group_conv.weight_g
primals_3 = self.conv3_1.group_conv.weight_v
primals_7 = self.conv3_1.depth_conv.bias
primals_5 = self.conv3_1.depth_conv.weight_g
primals_6 = self.conv3_1.depth_conv.weight_v
primals_10 = self.conv3_1.point_conv.bias
primals_8 = self.conv3_1.point_conv.weight_g
primals_9 = self.conv3_1.point_conv.weight_v
primals_13 = self.convd_1.group_conv.bias
primals_11 = self.convd_1.group_conv.weight_g
primals_12 = self.convd_1.group_conv.weight_v
primals_16 = self.convd_1.depth_conv.bias
primals_14 = self.convd_1.depth_conv.weight_g
primals_15 = self.convd_1.depth_conv.weight_v
primals_19 = self.convd_1.point_conv.bias
primals_17 = self.convd_1.point_conv.weight_g
primals_18 = self.convd_1.point_conv.weight_v
primals_22 = self.conv3_2.group_conv.bias
primals_20 = self.conv3_2.group_conv.weight_g
primals_21 = self.conv3_2.group_conv.weight_v
primals_25 = self.conv3_2.depth_conv.bias
primals_23 = self.conv3_2.depth_conv.weight_g
primals_24 = self.conv3_2.depth_conv.weight_v
primals_28 = self.conv3_2.point_conv.bias
primals_26 = self.conv3_2.point_conv.weight_g
primals_27 = self.conv3_2.point_conv.weight_v
primals_31 = self.convd_2.group_conv.bias
primals_29 = self.convd_2.group_conv.weight_g
primals_30 = self.convd_2.group_conv.weight_v
primals_34 = self.convd_2.depth_conv.bias
primals_32 = self.convd_2.depth_conv.weight_g
primals_33 = self.convd_2.depth_conv.weight_v
primals_37 = self.convd_2.point_conv.bias
primals_35 = self.convd_2.point_conv.weight_g
primals_36 = self.convd_2.point_conv.weight_v
primals_40 = self.conv3_3.group_conv.bias
primals_38 = self.conv3_3.group_conv.weight_g
primals_39 = self.conv3_3.group_conv.weight_v
primals_43 = self.conv3_3.depth_conv.bias
primals_41 = self.conv3_3.depth_conv.weight_g
primals_42 = self.conv3_3.depth_conv.weight_v
primals_46 = self.conv3_3.point_conv.bias
primals_44 = self.conv3_3.point_conv.weight_g
primals_45 = self.conv3_3.point_conv.weight_v
primals_49 = self.convd_3.group_conv.bias
primals_47 = self.convd_3.group_conv.weight_g
primals_48 = self.convd_3.group_conv.weight_v
primals_52 = self.convd_3.depth_conv.bias
primals_50 = self.convd_3.depth_conv.weight_g
primals_51 = self.convd_3.depth_conv.weight_v
primals_55 = self.convd_3.point_conv.bias
primals_53 = self.convd_3.point_conv.weight_g
primals_54 = self.convd_3.point_conv.weight_v
primals_58 = self.conv_last.bias
primals_56 = self.conv_last.weight_g
primals_57 = self.conv_last.weight_v
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58])
return output[0]
| wwjfsfs/wwjyyds | MIRB2 | false | 13,209 | [
"MIT"
]
| 0 | 80cd6267fde7cd98838078a0d5178a557ceb7414 | https://github.com/wwjfsfs/wwjyyds/tree/80cd6267fde7cd98838078a0d5178a557ceb7414 |
MIRB1 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/34/c34lnuw3wok4euydis4jz2cumdv5zl53hr2km2mr6sokjcomm2j6.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm => pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_3, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1, 2, 3], True), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
triton_poi_fused__weight_norm_interface_0 = async_compile.triton('triton_poi_fused__weight_norm_interface_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (6*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (6*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (6*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (6*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (4 + (6*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (5 + (6*x0)), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tl.store(out_ptr0 + (x0), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/4h/c4hyivh4rlcg5ap7pj7xbbqscs2u2jjb4g5w4jaidlc7wpyz7lbi.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm => div, mul
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %pow_2), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %div), kwargs = {})
triton_poi_fused__weight_norm_interface_1 = async_compile.triton('triton_poi_fused__weight_norm_interface_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 108
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 6)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ij/cijtnciymslu5jmlqs4skpm4lp2nibv5hh3th6b5i2szhhqwwhmh.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %mul, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 3), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 294912
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 18
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/mz/cmzalccs4mduopsp2yd4zky3wwaeepvbubqparxslix3sntpfkub.py
# Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm_1 => div_1, mul_1, pow_3, pow_4, sum_2
# Graph fragment:
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_6, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1, 2, 3], True), kwargs = {})
# %pow_4 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_5, %pow_4), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_6, %div_1), kwargs = {})
triton_per_fused__weight_norm_interface_3 = async_compile.triton('triton_per_fused__weight_norm_interface_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[32, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__weight_norm_interface_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 18
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (9*x0)), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr0 + (r1 + (9*x0)), tmp9, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/dy/cdylggyn2ws3sfvdukozllsfp2dvd7jch6eitsmkngzuzggssgxt.py
# Topologically Sorted Source Nodes: [_weight_norm_2], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm_2 => div_2, mul_2, pow_5, pow_6, sum_3
# Graph fragment:
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_9, 2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [1, 2, 3], True), kwargs = {})
# %pow_6 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 0.5), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_8, %pow_6), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_9, %div_2), kwargs = {})
triton_per_fused__weight_norm_interface_4 = async_compile.triton('triton_per_fused__weight_norm_interface_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 32],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__weight_norm_interface_4(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 9
rnumel = 18
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (18*x0)), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr0 + (r1 + (18*x0)), tmp9, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/4r/c4rv2lnvemalwfmwemwkkjuqab3eovephhf3dkwsqaqrzbzurn6e.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%where, %where_1], 1), kwargs = {})
triton_poi_fused_cat_5 = async_compile.triton('triton_poi_fused_cat_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 294912
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 18
x0 = xindex % 4096
x2 = (xindex // 73728)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 9, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (36864*x2)), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = 0.0
tmp9 = tmp7 > tmp8
tmp10 = 0.2
tmp11 = tmp7 * tmp10
tmp12 = tl.where(tmp9, tmp7, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 18, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tl.load(in_ptr2 + (x0 + (4096*((-9) + x1)) + (36864*x2)), tmp15, other=0.0)
tmp19 = tl.load(in_ptr3 + ((-9) + x1), tmp15, eviction_policy='evict_last', other=0.0)
tmp20 = tmp18 + tmp19
tmp21 = tmp20 > tmp8
tmp22 = tmp20 * tmp10
tmp23 = tl.where(tmp21, tmp20, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp15, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp14, tmp25)
tl.store(out_ptr0 + (x3), tmp26, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/nt/cntzp5nakwvul7tcparxahaerbizrgojsic6at2feukyiwjvutbv.py
# Topologically Sorted Source Nodes: [_weight_norm_18], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm_18 => div_18, mul_24, pow_37, pow_38, sum_19
# Graph fragment:
# %pow_37 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_57, 2), kwargs = {})
# %sum_19 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_37, [1, 2, 3], True), kwargs = {})
# %pow_38 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_19, 0.5), kwargs = {})
# %div_18 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_56, %pow_38), kwargs = {})
# %mul_24 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_57, %div_18), kwargs = {})
triton_per_fused__weight_norm_interface_6 = async_compile.triton('triton_per_fused__weight_norm_interface_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[32, 32],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__weight_norm_interface_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 18
rnumel = 18
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (18*x0)), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr0 + (r1 + (18*x0)), tmp9, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/yr/cyrzws2lnjde2as7gzejnwjcj4lymm7bgicdqzp4zy6p5j7qtkpv.py
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# out => convolution_18
# out_1 => add
# Graph fragment:
# %convolution_18 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_4, %mul_24, %primals_58, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_18, %primals_1), kwargs = {})
triton_poi_fused_add_convolution_7 = async_compile.triton('triton_poi_fused_add_convolution_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 294912
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 18
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/al/callo7bieu2ffgjrumilpfnh66yj2nuuap5g7jnjsb3x7g3p5ot7.py
# Topologically Sorted Source Nodes: [x_17, c2_4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# c2_4 => gt_5, mul_23, where_5
# x_17 => convolution_17
# Graph fragment:
# %convolution_17 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_16, %mul_22, %primals_55, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_17, 0), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_17, 0.2), kwargs = {})
# %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %convolution_17, %mul_23), kwargs = {})
# %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_5, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 147456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 9
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + (x3), tmp8, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58 = args
args.clear()
assert_size_stride(primals_1, (4, 18, 64, 64), (73728, 4096, 64, 1))
assert_size_stride(primals_2, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_3, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_4, (18, ), (1, ))
assert_size_stride(primals_5, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_6, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_7, (18, ), (1, ))
assert_size_stride(primals_8, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_9, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_10, (9, ), (1, ))
assert_size_stride(primals_11, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_12, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_13, (18, ), (1, ))
assert_size_stride(primals_14, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_15, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_16, (18, ), (1, ))
assert_size_stride(primals_17, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_18, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_19, (9, ), (1, ))
assert_size_stride(primals_20, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_21, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_22, (18, ), (1, ))
assert_size_stride(primals_23, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_24, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_25, (18, ), (1, ))
assert_size_stride(primals_26, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_27, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_28, (9, ), (1, ))
assert_size_stride(primals_29, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_30, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_31, (18, ), (1, ))
assert_size_stride(primals_32, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_33, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_34, (18, ), (1, ))
assert_size_stride(primals_35, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_36, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_37, (9, ), (1, ))
assert_size_stride(primals_38, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_39, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_40, (18, ), (1, ))
assert_size_stride(primals_41, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_42, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_43, (18, ), (1, ))
assert_size_stride(primals_44, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_45, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_46, (9, ), (1, ))
assert_size_stride(primals_47, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_48, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_49, (18, ), (1, ))
assert_size_stride(primals_50, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_51, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_52, (18, ), (1, ))
assert_size_stride(primals_53, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_54, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_55, (9, ), (1, ))
assert_size_stride(primals_56, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_57, (18, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_58, (18, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
stream0 = get_raw_stream(0)
triton_poi_fused__weight_norm_interface_0.run(primals_3, buf0, 18, grid=grid(18), stream=stream0)
buf1 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_3, primals_2, buf0, buf1, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_1, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf2, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf3, primals_4, 294912, grid=grid(294912), stream=stream0)
del primals_4
buf4 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf5 = reinterpret_tensor(buf4, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf4 # reuse
buf6 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf5, primals_6, primals_5, buf6, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf3, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf7, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf8, primals_7, 294912, grid=grid(294912), stream=stream0)
del primals_7
buf9 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf10 = reinterpret_tensor(buf9, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf9 # reuse
buf11 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_2], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf10, primals_9, primals_8, buf11, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf8, buf11, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf13 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_3], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_12, buf13, 18, grid=grid(18), stream=stream0)
buf14 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_3], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_12, primals_11, buf13, buf14, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf15 = extern_kernels.convolution(primals_1, buf14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf15, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf16 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf16, primals_13, 294912, grid=grid(294912), stream=stream0)
del primals_13
buf17 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf18 = reinterpret_tensor(buf17, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf17 # reuse
buf19 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_4], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf18, primals_15, primals_14, buf19, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf16, buf19, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf20, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf21, primals_16, 294912, grid=grid(294912), stream=stream0)
del primals_16
buf22 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf23 = reinterpret_tensor(buf22, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf22 # reuse
buf24 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_5], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf23, primals_18, primals_17, buf24, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution]
buf25 = extern_kernels.convolution(buf21, buf24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf26 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf12, primals_10, buf25, primals_19, buf26, 294912, grid=grid(294912), stream=stream0)
buf27 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_6], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_21, buf27, 18, grid=grid(18), stream=stream0)
buf28 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_6], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_21, primals_20, buf27, buf28, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
buf29 = extern_kernels.convolution(buf26, buf28, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf29, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf30 = buf29; del buf29 # reuse
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf30, primals_22, 294912, grid=grid(294912), stream=stream0)
del primals_22
buf31 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf32 = reinterpret_tensor(buf31, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf31 # reuse
buf33 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_7], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf32, primals_24, primals_23, buf33, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution]
buf34 = extern_kernels.convolution(buf30, buf33, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf34, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf35 = buf34; del buf34 # reuse
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf35, primals_25, 294912, grid=grid(294912), stream=stream0)
del primals_25
buf36 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf37 = reinterpret_tensor(buf36, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf36 # reuse
buf38 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_8], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf37, primals_27, primals_26, buf38, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
buf39 = extern_kernels.convolution(buf35, buf38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf40 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_9], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_30, buf40, 18, grid=grid(18), stream=stream0)
buf41 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_9], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_30, primals_29, buf40, buf41, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution]
buf42 = extern_kernels.convolution(buf26, buf41, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf42, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf43 = buf42; del buf42 # reuse
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf43, primals_31, 294912, grid=grid(294912), stream=stream0)
del primals_31
buf44 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf45 = reinterpret_tensor(buf44, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf44 # reuse
buf46 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_10], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf45, primals_33, primals_32, buf46, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution]
buf47 = extern_kernels.convolution(buf43, buf46, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf47, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf48 = buf47; del buf47 # reuse
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf48, primals_34, 294912, grid=grid(294912), stream=stream0)
del primals_34
buf49 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf50 = reinterpret_tensor(buf49, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf49 # reuse
buf51 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_11], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf50, primals_36, primals_35, buf51, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution]
buf52 = extern_kernels.convolution(buf48, buf51, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf53 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf39, primals_28, buf52, primals_37, buf53, 294912, grid=grid(294912), stream=stream0)
buf54 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_12], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_39, buf54, 18, grid=grid(18), stream=stream0)
buf55 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_12], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_39, primals_38, buf54, buf55, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution]
buf56 = extern_kernels.convolution(buf53, buf55, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf56, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf57 = buf56; del buf56 # reuse
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf57, primals_40, 294912, grid=grid(294912), stream=stream0)
del primals_40
buf58 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf59 = reinterpret_tensor(buf58, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf58 # reuse
buf60 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_13], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf59, primals_42, primals_41, buf60, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.convolution]
buf61 = extern_kernels.convolution(buf57, buf60, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf61, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf62 = buf61; del buf61 # reuse
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf62, primals_43, 294912, grid=grid(294912), stream=stream0)
del primals_43
buf63 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf64 = reinterpret_tensor(buf63, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf63 # reuse
buf65 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_14], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf64, primals_45, primals_44, buf65, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution]
buf66 = extern_kernels.convolution(buf62, buf65, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf67 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_15], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_48, buf67, 18, grid=grid(18), stream=stream0)
buf68 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_15], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_48, primals_47, buf67, buf68, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution]
buf69 = extern_kernels.convolution(buf53, buf68, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf69, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf70 = buf69; del buf69 # reuse
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf70, primals_49, 294912, grid=grid(294912), stream=stream0)
del primals_49
buf71 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf72 = reinterpret_tensor(buf71, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf71 # reuse
buf73 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_16], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf72, primals_51, primals_50, buf73, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution]
buf74 = extern_kernels.convolution(buf70, buf73, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf74, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf75 = buf74; del buf74 # reuse
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf75, primals_52, 294912, grid=grid(294912), stream=stream0)
del primals_52
buf76 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf77 = reinterpret_tensor(buf76, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf76 # reuse
buf78 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_17], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf77, primals_54, primals_53, buf78, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_17], Original ATen: [aten.convolution]
buf79 = extern_kernels.convolution(buf75, buf78, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf79, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf80 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_4], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf66, primals_46, buf79, primals_55, buf80, 294912, grid=grid(294912), stream=stream0)
buf81 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf82 = reinterpret_tensor(buf81, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf81 # reuse
buf83 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_18], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_6.run(buf82, primals_57, primals_56, buf83, 18, 18, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf84 = extern_kernels.convolution(buf80, buf83, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf84, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf85 = buf84; del buf84 # reuse
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_7.run(buf85, primals_58, primals_1, 294912, grid=grid(294912), stream=stream0)
del primals_58
buf86 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_17, c2_4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8.run(buf79, primals_55, buf86, 147456, grid=grid(147456), stream=stream0)
del buf79
del primals_55
buf87 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_14, c1_4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8.run(buf66, primals_46, buf87, 147456, grid=grid(147456), stream=stream0)
del buf66
del primals_46
buf88 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_11, c2_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8.run(buf52, primals_37, buf88, 147456, grid=grid(147456), stream=stream0)
del buf52
del primals_37
buf89 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_8, c1_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8.run(buf39, primals_28, buf89, 147456, grid=grid(147456), stream=stream0)
del buf39
del primals_28
buf90 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_5, c2_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8.run(buf25, primals_19, buf90, 147456, grid=grid(147456), stream=stream0)
del buf25
del primals_19
buf91 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_2, c1_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8.run(buf12, primals_10, buf91, 147456, grid=grid(147456), stream=stream0)
del buf12
del primals_10
return (buf85, buf1, buf6, buf11, buf14, buf19, buf24, buf28, buf33, buf38, buf41, buf46, buf51, buf55, buf60, buf65, buf68, buf73, buf78, buf83, primals_1, primals_2, primals_3, primals_5, primals_6, primals_8, primals_9, primals_11, primals_12, primals_14, primals_15, primals_17, primals_18, primals_20, primals_21, primals_23, primals_24, primals_26, primals_27, primals_29, primals_30, primals_32, primals_33, primals_35, primals_36, primals_38, primals_39, primals_41, primals_42, primals_44, primals_45, primals_47, primals_48, primals_50, primals_51, primals_53, primals_54, primals_56, primals_57, buf0, buf1, buf3, buf5, buf6, buf8, buf10, buf11, buf13, buf14, buf16, buf18, buf19, buf21, buf23, buf24, buf26, buf27, buf28, buf30, buf32, buf33, buf35, buf37, buf38, buf40, buf41, buf43, buf45, buf46, buf48, buf50, buf51, buf53, buf54, buf55, buf57, buf59, buf60, buf62, buf64, buf65, buf67, buf68, buf70, buf72, buf73, buf75, buf77, buf78, buf80, buf82, buf83, buf86, buf87, buf88, buf89, buf90, buf91, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 18, 64, 64), (73728, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_48 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_49 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_50 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_51 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_52 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_53 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_54 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_55 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_56 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_57 = rand_strided((18, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_58 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, groups=3):
super(ConvBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
def wn(x):
return torch.nn.utils.weight_norm(x)
self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
1, groups=self.groups))
self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
3, padding=1, groups=in_channels))
self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels,
1, groups=1))
def forward(self, x):
x = self.group_conv(x)
x = self.depth_conv(x)
x = self.point_conv(x)
return x
class MIRB1(nn.Module):
def __init__(self, args):
super(MIRB1, self).__init__()
self.c_out = args.n_feats // 2
def wn(x):
return torch.nn.utils.weight_norm(x)
self.conv3_1 = ConvBlock(args.n_feats, self.c_out)
self.convd_1 = ConvBlock(args.n_feats, self.c_out)
self.conv3_2 = ConvBlock(args.n_feats, self.c_out)
self.convd_2 = ConvBlock(args.n_feats, self.c_out)
self.conv3_3 = ConvBlock(args.n_feats, self.c_out)
self.convd_3 = ConvBlock(args.n_feats, self.c_out)
self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1))
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
res = x
c1_1 = self.lrelu(self.conv3_1(res))
c2_1 = self.lrelu(self.convd_1(res))
c1_2 = self.lrelu(self.conv3_2(torch.cat([c1_1, c2_1], 1)))
c2_2 = self.lrelu(self.convd_2(torch.cat([c1_1, c2_1], 1)))
c1_4 = self.lrelu(self.conv3_3(torch.cat([c1_2, c2_2], 1)))
c2_4 = self.lrelu(self.convd_3(torch.cat([c1_2, c2_2], 1)))
out = self.conv_last(torch.cat([c1_4, c2_4], 1))
out = out + x
return out
def get_inputs():
return [torch.rand([4, 18, 64, 64])]
def get_init_inputs():
return [[], {'args': _mock_config(n_feats=18)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 18
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 6 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 6 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 6 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 6 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (4 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (5 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tl.store(out_ptr0 + x0, tmp17, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 108
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 6
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 18
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_per_fused__weight_norm_interface_3(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 18
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 9 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 9 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_per_fused__weight_norm_interface_4(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 9
rnumel = 18
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 18 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 18 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 18
x0 = xindex % 4096
x2 = xindex // 73728
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 9, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 36864 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = 0.0
tmp9 = tmp7 > tmp8
tmp10 = 0.2
tmp11 = tmp7 * tmp10
tmp12 = tl.where(tmp9, tmp7, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tl.full([1], 18, tl.int64)
tmp18 = tl.load(in_ptr2 + (x0 + 4096 * (-9 + x1) + 36864 * x2), tmp15,
other=0.0)
tmp19 = tl.load(in_ptr3 + (-9 + x1), tmp15, eviction_policy=
'evict_last', other=0.0)
tmp20 = tmp18 + tmp19
tmp21 = tmp20 > tmp8
tmp22 = tmp20 * tmp10
tmp23 = tl.where(tmp21, tmp20, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp15, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp14, tmp25)
tl.store(out_ptr0 + x3, tmp26, None)
@triton.jit
def triton_per_fused__weight_norm_interface_6(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 18
rnumel = 18
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 18 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 18 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_poi_fused_add_convolution_7(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 18
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 9
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + x3, tmp8, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57, primals_58
) = args
args.clear()
assert_size_stride(primals_1, (4, 18, 64, 64), (73728, 4096, 64, 1))
assert_size_stride(primals_2, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_3, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_4, (18,), (1,))
assert_size_stride(primals_5, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_6, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_7, (18,), (1,))
assert_size_stride(primals_8, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_9, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_10, (9,), (1,))
assert_size_stride(primals_11, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_12, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_13, (18,), (1,))
assert_size_stride(primals_14, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_15, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_16, (18,), (1,))
assert_size_stride(primals_17, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_18, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_19, (9,), (1,))
assert_size_stride(primals_20, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_21, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_22, (18,), (1,))
assert_size_stride(primals_23, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_24, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_25, (18,), (1,))
assert_size_stride(primals_26, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_27, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_28, (9,), (1,))
assert_size_stride(primals_29, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_30, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_31, (18,), (1,))
assert_size_stride(primals_32, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_33, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_34, (18,), (1,))
assert_size_stride(primals_35, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_36, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_37, (9,), (1,))
assert_size_stride(primals_38, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_39, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_40, (18,), (1,))
assert_size_stride(primals_41, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_42, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_43, (18,), (1,))
assert_size_stride(primals_44, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_45, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_46, (9,), (1,))
assert_size_stride(primals_47, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_48, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_49, (18,), (1,))
assert_size_stride(primals_50, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_51, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_52, (18,), (1,))
assert_size_stride(primals_53, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_54, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_55, (9,), (1,))
assert_size_stride(primals_56, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_57, (18, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_58, (18,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_3, buf0,
18, XBLOCK=32, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_3,
primals_2, buf0, buf1, 108, XBLOCK=128, num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(primals_1, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf2, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_2[grid(294912)](buf3, primals_4,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32
)
buf5 = reinterpret_tensor(buf4, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf4
buf6 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf5, primals_6,
primals_5, buf6, 18, 9, XBLOCK=32, num_warps=4, num_stages=1)
buf7 = extern_kernels.convolution(buf3, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf7, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf8 = buf7
del buf7
triton_poi_fused_convolution_2[grid(294912)](buf8, primals_7,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf9 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf10 = reinterpret_tensor(buf9, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf9
buf11 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf10, primals_9,
primals_8, buf11, 9, 18, XBLOCK=1, num_warps=2, num_stages=1)
buf12 = extern_kernels.convolution(buf8, buf11, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf13 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_12,
buf13, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_12,
primals_11, buf13, buf14, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf15 = extern_kernels.convolution(primals_1, buf14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf15, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf16 = buf15
del buf15
triton_poi_fused_convolution_2[grid(294912)](buf16, primals_13,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_13
buf17 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf18 = reinterpret_tensor(buf17, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf17
buf19 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf18,
primals_15, primals_14, buf19, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf20 = extern_kernels.convolution(buf16, buf19, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf20, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_2[grid(294912)](buf21, primals_16,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_16
buf22 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf23 = reinterpret_tensor(buf22, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf22
buf24 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf23,
primals_18, primals_17, buf24, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf25 = extern_kernels.convolution(buf21, buf24, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf26 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf12, primals_10, buf25,
primals_19, buf26, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf27 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_21,
buf27, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf28 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_21,
primals_20, buf27, buf28, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf29 = extern_kernels.convolution(buf26, buf28, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf29, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf30 = buf29
del buf29
triton_poi_fused_convolution_2[grid(294912)](buf30, primals_22,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_22
buf31 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf32 = reinterpret_tensor(buf31, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf31
buf33 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf32,
primals_24, primals_23, buf33, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf34 = extern_kernels.convolution(buf30, buf33, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf34, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf35 = buf34
del buf34
triton_poi_fused_convolution_2[grid(294912)](buf35, primals_25,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_25
buf36 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf37 = reinterpret_tensor(buf36, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf36
buf38 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf37,
primals_27, primals_26, buf38, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf39 = extern_kernels.convolution(buf35, buf38, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf40 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_30,
buf40, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf41 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_30,
primals_29, buf40, buf41, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf42 = extern_kernels.convolution(buf26, buf41, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf42, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf43 = buf42
del buf42
triton_poi_fused_convolution_2[grid(294912)](buf43, primals_31,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_31
buf44 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf45 = reinterpret_tensor(buf44, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf44
buf46 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf45,
primals_33, primals_32, buf46, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf47 = extern_kernels.convolution(buf43, buf46, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf47, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf48 = buf47
del buf47
triton_poi_fused_convolution_2[grid(294912)](buf48, primals_34,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_34
buf49 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf50 = reinterpret_tensor(buf49, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf49
buf51 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf50,
primals_36, primals_35, buf51, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf52 = extern_kernels.convolution(buf48, buf51, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf53 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf39, primals_28, buf52,
primals_37, buf53, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf54 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_39,
buf54, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf55 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_39,
primals_38, buf54, buf55, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf56 = extern_kernels.convolution(buf53, buf55, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf56, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf57 = buf56
del buf56
triton_poi_fused_convolution_2[grid(294912)](buf57, primals_40,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_40
buf58 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf59 = reinterpret_tensor(buf58, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf58
buf60 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf59,
primals_42, primals_41, buf60, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf61 = extern_kernels.convolution(buf57, buf60, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf61, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf62 = buf61
del buf61
triton_poi_fused_convolution_2[grid(294912)](buf62, primals_43,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_43
buf63 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf64 = reinterpret_tensor(buf63, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf63
buf65 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf64,
primals_45, primals_44, buf65, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf66 = extern_kernels.convolution(buf62, buf65, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf67 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_48,
buf67, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf68 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_48,
primals_47, buf67, buf68, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf69 = extern_kernels.convolution(buf53, buf68, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf69, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf70 = buf69
del buf69
triton_poi_fused_convolution_2[grid(294912)](buf70, primals_49,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_49
buf71 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf72 = reinterpret_tensor(buf71, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf71
buf73 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf72,
primals_51, primals_50, buf73, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf74 = extern_kernels.convolution(buf70, buf73, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf74, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf75 = buf74
del buf74
triton_poi_fused_convolution_2[grid(294912)](buf75, primals_52,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_52
buf76 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf77 = reinterpret_tensor(buf76, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf76
buf78 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf77,
primals_54, primals_53, buf78, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf79 = extern_kernels.convolution(buf75, buf78, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf79, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf80 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf66, primals_46, buf79,
primals_55, buf80, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf81 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf82 = reinterpret_tensor(buf81, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf81
buf83 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_6[grid(18)](buf82,
primals_57, primals_56, buf83, 18, 18, XBLOCK=32, num_warps=8,
num_stages=1)
buf84 = extern_kernels.convolution(buf80, buf83, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf84, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf85 = buf84
del buf84
triton_poi_fused_add_convolution_7[grid(294912)](buf85, primals_58,
primals_1, 294912, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_58
buf86 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid(
147456)](buf79, primals_55, buf86, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf79
del primals_55
buf87 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid(
147456)](buf66, primals_46, buf87, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf66
del primals_46
buf88 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid(
147456)](buf52, primals_37, buf88, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf52
del primals_37
buf89 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid(
147456)](buf39, primals_28, buf89, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf39
del primals_28
buf90 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid(
147456)](buf25, primals_19, buf90, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf25
del primals_19
buf91 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_8[grid(
147456)](buf12, primals_10, buf91, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf12
del primals_10
return (buf85, buf1, buf6, buf11, buf14, buf19, buf24, buf28, buf33,
buf38, buf41, buf46, buf51, buf55, buf60, buf65, buf68, buf73,
buf78, buf83, primals_1, primals_2, primals_3, primals_5, primals_6,
primals_8, primals_9, primals_11, primals_12, primals_14,
primals_15, primals_17, primals_18, primals_20, primals_21,
primals_23, primals_24, primals_26, primals_27, primals_29,
primals_30, primals_32, primals_33, primals_35, primals_36,
primals_38, primals_39, primals_41, primals_42, primals_44,
primals_45, primals_47, primals_48, primals_50, primals_51,
primals_53, primals_54, primals_56, primals_57, buf0, buf1, buf3,
buf5, buf6, buf8, buf10, buf11, buf13, buf14, buf16, buf18, buf19,
buf21, buf23, buf24, buf26, buf27, buf28, buf30, buf32, buf33,
buf35, buf37, buf38, buf40, buf41, buf43, buf45, buf46, buf48,
buf50, buf51, buf53, buf54, buf55, buf57, buf59, buf60, buf62,
buf64, buf65, buf67, buf68, buf70, buf72, buf73, buf75, buf77,
buf78, buf80, buf82, buf83, buf86, buf87, buf88, buf89, buf90, buf91)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, groups=3):
super(ConvBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
def wn(x):
return torch.nn.utils.weight_norm(x)
self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
1, groups=self.groups))
self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
3, padding=1, groups=in_channels))
self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels,
1, groups=1))
def forward(self, x):
x = self.group_conv(x)
x = self.depth_conv(x)
x = self.point_conv(x)
return x
class MIRB1New(nn.Module):
def __init__(self, args):
super(MIRB1New, self).__init__()
self.c_out = args.n_feats // 2
def wn(x):
return torch.nn.utils.weight_norm(x)
self.conv3_1 = ConvBlock(args.n_feats, self.c_out)
self.convd_1 = ConvBlock(args.n_feats, self.c_out)
self.conv3_2 = ConvBlock(args.n_feats, self.c_out)
self.convd_2 = ConvBlock(args.n_feats, self.c_out)
self.conv3_3 = ConvBlock(args.n_feats, self.c_out)
self.convd_3 = ConvBlock(args.n_feats, self.c_out)
self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1))
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, input_0):
primals_4 = self.conv3_1.group_conv.bias
primals_2 = self.conv3_1.group_conv.weight_g
primals_3 = self.conv3_1.group_conv.weight_v
primals_7 = self.conv3_1.depth_conv.bias
primals_5 = self.conv3_1.depth_conv.weight_g
primals_6 = self.conv3_1.depth_conv.weight_v
primals_10 = self.conv3_1.point_conv.bias
primals_8 = self.conv3_1.point_conv.weight_g
primals_9 = self.conv3_1.point_conv.weight_v
primals_13 = self.convd_1.group_conv.bias
primals_11 = self.convd_1.group_conv.weight_g
primals_12 = self.convd_1.group_conv.weight_v
primals_16 = self.convd_1.depth_conv.bias
primals_14 = self.convd_1.depth_conv.weight_g
primals_15 = self.convd_1.depth_conv.weight_v
primals_19 = self.convd_1.point_conv.bias
primals_17 = self.convd_1.point_conv.weight_g
primals_18 = self.convd_1.point_conv.weight_v
primals_22 = self.conv3_2.group_conv.bias
primals_20 = self.conv3_2.group_conv.weight_g
primals_21 = self.conv3_2.group_conv.weight_v
primals_25 = self.conv3_2.depth_conv.bias
primals_23 = self.conv3_2.depth_conv.weight_g
primals_24 = self.conv3_2.depth_conv.weight_v
primals_28 = self.conv3_2.point_conv.bias
primals_26 = self.conv3_2.point_conv.weight_g
primals_27 = self.conv3_2.point_conv.weight_v
primals_31 = self.convd_2.group_conv.bias
primals_29 = self.convd_2.group_conv.weight_g
primals_30 = self.convd_2.group_conv.weight_v
primals_34 = self.convd_2.depth_conv.bias
primals_32 = self.convd_2.depth_conv.weight_g
primals_33 = self.convd_2.depth_conv.weight_v
primals_37 = self.convd_2.point_conv.bias
primals_35 = self.convd_2.point_conv.weight_g
primals_36 = self.convd_2.point_conv.weight_v
primals_40 = self.conv3_3.group_conv.bias
primals_38 = self.conv3_3.group_conv.weight_g
primals_39 = self.conv3_3.group_conv.weight_v
primals_43 = self.conv3_3.depth_conv.bias
primals_41 = self.conv3_3.depth_conv.weight_g
primals_42 = self.conv3_3.depth_conv.weight_v
primals_46 = self.conv3_3.point_conv.bias
primals_44 = self.conv3_3.point_conv.weight_g
primals_45 = self.conv3_3.point_conv.weight_v
primals_49 = self.convd_3.group_conv.bias
primals_47 = self.convd_3.group_conv.weight_g
primals_48 = self.convd_3.group_conv.weight_v
primals_52 = self.convd_3.depth_conv.bias
primals_50 = self.convd_3.depth_conv.weight_g
primals_51 = self.convd_3.depth_conv.weight_v
primals_55 = self.convd_3.point_conv.bias
primals_53 = self.convd_3.point_conv.weight_g
primals_54 = self.convd_3.point_conv.weight_v
primals_58 = self.conv_last.bias
primals_56 = self.conv_last.weight_g
primals_57 = self.conv_last.weight_v
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58])
return output[0]
| wwjfsfs/wwjyyds | MIRB1 | false | 13,210 | [
"MIT"
]
| 0 | 80cd6267fde7cd98838078a0d5178a557ceb7414 | https://github.com/wwjfsfs/wwjyyds/tree/80cd6267fde7cd98838078a0d5178a557ceb7414 |
BertLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/dk/cdk4odz276xorciau5ehgl7f3s2mgkf3hrye6xep6kzubczdeqqy.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/qd/cqdo5wv3kploo6uotug3zsqyson32sal5bj7kasnk2vzliy3j5h6.py
# Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq]
# Source node to ATen node mapping:
# eq => eq
# Graph fragment:
# %eq : [num_users=2] = call_function[target=torch.ops.aten.eq.Scalar](args = (%primals_8, -10000.0), kwargs = {})
triton_poi_fused_eq_1 = async_compile.triton('triton_poi_fused_eq_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_eq_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = -10000.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ce/ccew3aarqwghirerz43ladwwc3gll4wqkuac6bilja56jcp4uqcm.py
# Topologically Sorted Source Nodes: [att_scores, masked_fill_, att_scores_1], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# att_scores => div
# att_scores_1 => amax, exp, sub, sum_1
# masked_fill_ => full_default, where
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -10000.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_div_masked_fill_2 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp7 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp12 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp17 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = -10000.0
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp8 = tmp7 * tmp2
tmp9 = tl.where(tmp6, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp13 = tmp12 * tmp2
tmp14 = tl.where(tmp11, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp18 = tmp17 * tmp2
tmp19 = tl.where(tmp16, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + (x2), tmp20, xmask)
tl.store(out_ptr1 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ur/cur63ty6ioejq5ketok6ig4an25ma7edi64jshpb6indrifcsprf.py
# Topologically Sorted Source Nodes: [att_scores, masked_fill_, att_scores_1], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# att_scores => div
# att_scores_1 => amax, div_1, exp, sub
# masked_fill_ => full_default, where
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -10000.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_div_masked_fill_3 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x4 = xindex
x5 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + (x4), xmask)
tmp6 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = -10000.0
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + (x4), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/6m/c6mhj5zwirfhy5e4o45uaeov72uwfby4udubpm2fcz42iqvs2g57.py
# Topologically Sorted Source Nodes: [add, hidden_states], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# hidden_states => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_17), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/l3/cl3bnd5pv2p4ydfmlj74bv4mbiwr2ntrdvbubnjubetyhosmxag6.py
# Topologically Sorted Source Nodes: [add, hidden_states], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# hidden_states => add_1, add_2, mul, mul_1, rsqrt, sub_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_17), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_11), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_12), kwargs = {})
triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/u2/cu2fig7atthxt4e4ppqi7fkzy5tvgusm4oiytjrwoaaw43vzqnis.py
# Topologically Sorted Source Nodes: [interim_hidden_states], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# interim_hidden_states => add_3, erf, mul_2, mul_3, mul_4
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_19, 0.5), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_19, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_3,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %add_3), kwargs = {})
triton_poi_fused_gelu_7 = async_compile.triton('triton_poi_fused_gelu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/he/chevf4d6tadiz3y2a2abr2lj2bvo3wyfykoivwj2s4xedp3vdjuf.py
# Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add_1 => add_4
# Graph fragment:
# %add_4 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %view_21), kwargs = {})
triton_poi_fused_add_8 = async_compile.triton('triton_poi_fused_add_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/gz/cgzqe5dke5rie2wa5xpxqomksfdsrkusulax72rkvntne4p5jvyf.py
# Topologically Sorted Source Nodes: [hidden_states_1], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# hidden_states_1 => add_5, rsqrt_1, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_4, [2]), kwargs = {correction: 0, keepdim: True})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1.0), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_5,), kwargs = {})
triton_poi_fused_native_layer_norm_9 = async_compile.triton('triton_poi_fused_native_layer_norm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1.0
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/al/cal3txxjlyumb2wxf6pzsp7g5yvv5ygiluv6ygjjzldvb2woph4t.py
# Topologically Sorted Source Nodes: [hidden_states_1], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# hidden_states_1 => add_5, add_6, mul_5, mul_6, rsqrt_1, sub_2, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_4, [2]), kwargs = {correction: 0, keepdim: True})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1.0), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_5,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %getitem_3), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %primals_17), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %primals_18), kwargs = {})
triton_poi_fused_native_layer_norm_10 = async_compile.triton('triton_poi_fused_native_layer_norm_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4, ), (1, ))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4, ), (1, ))
assert_size_stride(primals_17, (4, ), (1, ))
assert_size_stride(primals_18, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf2, primals_7, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_7
buf4 = reinterpret_tensor(buf2, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf0, primals_3, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_3
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq]
triton_poi_fused_eq_1.run(primals_8, buf6, 64, grid=grid(64), stream=stream0)
del primals_8
buf7 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf0 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [att_scores, masked_fill_, att_scores_1], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_div_masked_fill_2.run(buf6, buf5, buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [att_scores, masked_fill_, att_scores_1], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_div_masked_fill_3.run(buf9, buf6, buf7, buf8, 256, grid=grid(256), stream=stream0)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [attn_value], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, primals_5, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf11 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [attn_value], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13)
del primals_10
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [add, hidden_states], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_5.run(primals_1, buf13, buf14, buf15, 16, grid=grid(16), stream=stream0)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, hidden_states], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_6.run(primals_1, buf13, buf14, buf15, primals_11, primals_12, buf16, 64, grid=grid(64), stream=stream0)
del primals_12
buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_14, reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf17)
del primals_14
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [interim_hidden_states], Original ATen: [aten.gelu]
triton_poi_fused_gelu_7.run(buf17, buf18, 64, grid=grid(64), stream=stream0)
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19)
buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0); del buf19 # reuse
# Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add]
triton_poi_fused_add_8.run(buf20, buf16, primals_16, 64, grid=grid(64), stream=stream0)
del primals_16
buf21 = buf15; del buf15 # reuse
buf22 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [hidden_states_1], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_9.run(buf20, buf21, buf22, 16, grid=grid(16), stream=stream0)
buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden_states_1], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_10.run(buf20, buf21, buf22, primals_17, primals_18, buf23, 64, grid=grid(64), stream=stream0)
del buf21
del buf22
del primals_18
return (buf23, primals_1, primals_11, primals_17, buf6, buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, reinterpret_tensor(buf16, (16, 4), (4, 1), 0), buf17, reinterpret_tensor(buf18, (16, 4), (4, 1), 0), buf20, primals_15, primals_13, primals_9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transform(self, x, linear_layer):
bs, seq_len = x.shape[:2]
proj = linear_layer(x)
proj = proj.view(bs, seq_len, self.num_attention_heads, self.
attention_head_size)
proj = proj.transpose(1, 2)
return proj
def attention(self, key, query, value, attention_mask):
att_scores = query @ key.transpose(-2, -1) / math.sqrt(self.
attention_head_size)
att_scores.masked_fill_(attention_mask == -10000.0, value=-10000.0)
att_scores = F.softmax(att_scores, dim=-1)
att_scores = self.dropout(att_scores)
return att_scores @ value
def forward(self, hidden_states, attention_mask):
key_layer = self.transform(hidden_states, self.key)
value_layer = self.transform(hidden_states, self.value)
query_layer = self.transform(hidden_states, self.query)
attn_value = self.attention(key_layer, query_layer, value_layer,
attention_mask)
return attn_value
class BertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.self_attention = BertSelfAttention(config)
self.attention_dense = nn.Linear(config.hidden_size, config.hidden_size
)
self.attention_layer_norm = nn.LayerNorm(config.hidden_size, eps=
config.layer_norm_eps)
self.attention_dropout = nn.Dropout(config.hidden_dropout_prob)
self.interm_dense = nn.Linear(config.hidden_size, config.
intermediate_size)
self.interm_af = F.gelu
self.out_dense = nn.Linear(config.intermediate_size, config.hidden_size
)
self.out_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.out_dropout = nn.Dropout(config.hidden_dropout_prob)
def add_norm(self, input, output, dense_layer, dropout, ln_layer):
"""
input: the input
output: the input that requires the sublayer to transform
dense_layer, dropput: the sublayer
ln_layer: layer norm that takes input+sublayer(output)
"""
sublayer = dropout(dense_layer(output))
return ln_layer(input + sublayer)
def forward(self, hidden_states, attention_mask):
attn_values = self.self_attention(hidden_states, attention_mask)
bs = hidden_states.size(0)
attn_values = attn_values.transpose(1, 2).contiguous().view(bs, -1,
self.self_attention.all_head_size)
hidden_states = self.add_norm(hidden_states, attn_values, self.
attention_dense, self.attention_dropout, self.attention_layer_norm)
interim_hidden_states = self.interm_af(self.interm_dense(hidden_states)
)
hidden_states = self.add_norm(hidden_states, interim_hidden_states,
self.out_dense, self.out_dropout, self.out_layer_norm)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(num_attention_heads=4, hidden_size=
4, attention_probs_dropout_prob=0.5, layer_norm_eps=1,
hidden_dropout_prob=0.5, intermediate_size=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_eq_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -10000.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp7 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = -10000.0
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp8 = tmp7 * tmp2
tmp9 = tl.where(tmp6, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp13 = tmp12 * tmp2
tmp14 = tl.where(tmp11, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp18 = tmp17 * tmp2
tmp19 = tl.where(tmp16, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + x2, tmp20, xmask)
tl.store(out_ptr1 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_3(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x4 = xindex
x5 = xindex // 4
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_out_ptr0 + x4, xmask)
tmp6 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = -10000.0
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + x4, tmp10, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_gelu_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1.0
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17, primals_18
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_7
buf4 = reinterpret_tensor(buf2, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf2
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_3
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_eq_1[grid(64)](primals_8, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_8
buf7 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_div_masked_fill_2[grid(64)](buf6, buf5,
buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_div_masked_fill_3[grid(256)](buf9, buf6,
buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf8
triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf10, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf11 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_10
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_1, buf13,
buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_1, buf13,
buf14, buf15, primals_11, primals_12, buf16, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_12
buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_14, reinterpret_tensor(buf16, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf17)
del primals_14
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_gelu_7[grid(64)](buf17, buf18, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19)
buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0)
del buf19
triton_poi_fused_add_8[grid(64)](buf20, buf16, primals_16, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_16
buf21 = buf15
del buf15
buf22 = buf14
del buf14
triton_poi_fused_native_layer_norm_9[grid(16)](buf20, buf21, buf22,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_10[grid(64)](buf20, buf21, buf22,
primals_17, primals_18, buf23, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf21
del buf22
del primals_18
return (buf23, primals_1, primals_11, primals_17, buf6, buf9,
reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13,
reinterpret_tensor(buf16, (16, 4), (4, 1), 0), buf17,
reinterpret_tensor(buf18, (16, 4), (4, 1), 0), buf20, primals_15,
primals_13, primals_9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1,
1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0))
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transform(self, x, linear_layer):
bs, seq_len = x.shape[:2]
proj = linear_layer(x)
proj = proj.view(bs, seq_len, self.num_attention_heads, self.
attention_head_size)
proj = proj.transpose(1, 2)
return proj
def attention(self, key, query, value, attention_mask):
att_scores = query @ key.transpose(-2, -1) / math.sqrt(self.
attention_head_size)
att_scores.masked_fill_(attention_mask == -10000.0, value=-10000.0)
att_scores = F.softmax(att_scores, dim=-1)
att_scores = self.dropout(att_scores)
return att_scores @ value
def forward(self, hidden_states, attention_mask):
key_layer = self.transform(hidden_states, self.key)
value_layer = self.transform(hidden_states, self.value)
query_layer = self.transform(hidden_states, self.query)
attn_value = self.attention(key_layer, query_layer, value_layer,
attention_mask)
return attn_value
class BertLayerNew(nn.Module):
def __init__(self, config):
super().__init__()
self.self_attention = BertSelfAttention(config)
self.attention_dense = nn.Linear(config.hidden_size, config.hidden_size
)
self.attention_layer_norm = nn.LayerNorm(config.hidden_size, eps=
config.layer_norm_eps)
self.attention_dropout = nn.Dropout(config.hidden_dropout_prob)
self.interm_dense = nn.Linear(config.hidden_size, config.
intermediate_size)
self.interm_af = F.gelu
self.out_dense = nn.Linear(config.intermediate_size, config.hidden_size
)
self.out_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.out_dropout = nn.Dropout(config.hidden_dropout_prob)
def add_norm(self, input, output, dense_layer, dropout, ln_layer):
"""
input: the input
output: the input that requires the sublayer to transform
dense_layer, dropput: the sublayer
ln_layer: layer norm that takes input+sublayer(output)
"""
sublayer = dropout(dense_layer(output))
return ln_layer(input + sublayer)
def forward(self, input_0, input_1):
primals_2 = self.self_attention.query.weight
primals_3 = self.self_attention.query.bias
primals_4 = self.self_attention.key.weight
primals_5 = self.self_attention.key.bias
primals_6 = self.self_attention.value.weight
primals_7 = self.self_attention.value.bias
primals_9 = self.attention_dense.weight
primals_10 = self.attention_dense.bias
primals_11 = self.attention_layer_norm.weight
primals_12 = self.attention_layer_norm.bias
primals_13 = self.interm_dense.weight
primals_14 = self.interm_dense.bias
primals_15 = self.out_dense.weight
primals_16 = self.out_dense.bias
primals_17 = self.out_layer_norm.weight
primals_18 = self.out_layer_norm.bias
primals_1 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18])
return output[0]
| Abhimanyu08/minbert-assignment | BertLayer | false | 13,211 | [
"Apache-2.0"
]
| 0 | 1610364213b1aab2d5446175dffabd7e1742833b | https://github.com/Abhimanyu08/minbert-assignment/tree/1610364213b1aab2d5446175dffabd7e1742833b |
BertOutput | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/ai/cai32p2ssjvpyulvuzcicdszqe3thbavgxn4jeed6uatjnl7yq2s.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/dt/cdtfyiepqseugx5m3udiopa26uo6fdp2fyvmfcoipxuqyqqnb2l6.py
# Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# hidden_states_2 => add_1, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1.0
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/mn/cmntyljhuirhsdjg2yosgzllpkpxqedxgoyk6gunquq2rf3kl7u5.py
# Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# hidden_states_2 => add_1, add_2, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_6), kwargs = {})
triton_poi_fused_native_layer_norm_2 = async_compile.triton('triton_poi_fused_native_layer_norm_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf1, primals_2, primals_4, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(buf1, buf2, buf3, 64, grid=grid(64), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_2.run(buf1, buf2, buf3, primals_5, primals_6, buf4, 256, grid=grid(256), stream=stream0)
del buf2
del buf3
del primals_6
return (buf4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
from torch import nn
import torch.utils.checkpoint
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(intermediate_size=4, hidden_size=4,
layer_norm_eps=1, hidden_dropout_prob=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1.0
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf1, primals_2, primals_4, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](buf1, buf2, buf3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_2[grid(256)](buf1, buf2, buf3,
primals_5, primals_6, buf4, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf2
del buf3
del primals_6
return buf4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1
class BertOutputNew(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_0, input_1):
primals_1 = self.dense.weight
primals_2 = self.dense.bias
primals_5 = self.LayerNorm.weight
primals_6 = self.LayerNorm.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| Clemens123/transformers | BertOutput | false | 13,212 | [
"Apache-2.0"
]
| 0 | 22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26 | https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26 |
DeiTSelfAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {})
# %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/5j/c5jll3kxtd32cl7pwubrb5oky2mtzckfgip2xbwad7crvvp4zk4r.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_default_2, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_default_2, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/kt/cktnex5febczl2ac6zugjmcksgsd5kjdufazv65vtepuwob3cb7a.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {})
# %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_default_2, -inf), kwargs = {})
# %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {})
# %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {})
# %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {})
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (x2), xmask)
tmp26 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = float("-inf")
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = (tmp4 != 0)
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = (tmp9 != 0)
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = (tmp15 != 0)
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = (tmp21 != 0)
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + (x2), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# context_layer_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(buf5, buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(buf2, primals_7, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_7
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf9, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
del buf9
return (reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
from torch import nn
import torch.utils.checkpoint
class DeiTSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if (config.hidden_size % config.num_attention_heads != 0 and not
hasattr(config, 'embedding_size')):
raise ValueError(
f'The hidden size {config.hidden_size,} is not a multiple of the number of attention heads {config.num_attention_heads}.'
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, head_mask=None, output_attentions=False):
mixed_query_layer = self.query(hidden_states)
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (
context_layer,)
return outputs
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf8, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_7
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf9
return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class DeiTSelfAttentionNew(nn.Module):
def __init__(self, config):
super().__init__()
if (config.hidden_size % config.num_attention_heads != 0 and not
hasattr(config, 'embedding_size')):
raise ValueError(
f'The hidden size {config.hidden_size,} is not a multiple of the number of attention heads {config.num_attention_heads}.'
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, input_0):
primals_1 = self.query.weight
primals_2 = self.query.bias
primals_4 = self.key.weight
primals_5 = self.key.bias
primals_6 = self.value.weight
primals_7 = self.value.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| Clemens123/transformers | DeiTSelfAttention | false | 13,213 | [
"Apache-2.0"
]
| 0 | 22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26 | https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26 |
IBertClassificationHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/yy/cyya3js6wt64vdji3sfisvrqyfvqxwkwqq5mzg5bqjl2crzjs4t3.py
# Topologically Sorted Source Nodes: [hidden_states_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# hidden_states_1 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/lz/clzc7c4rqtr7ky6jrepxpu2dlmeo4y66gzcis5bqhwixpt7ktopj.py
# Topologically Sorted Source Nodes: [hidden_states_3], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# hidden_states_3 => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden_states_1], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [hidden_states_3], Original ATen: [aten.tanh]
triton_poi_fused_tanh_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden_states_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
return (reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
from torch import nn
import torch.utils.checkpoint
class IBertClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
hidden_states = features[:, 0, :]
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob=
0.5, num_labels=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_tanh_1[grid(64)](buf2, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, primals_4
class IBertClassificationHeadNew(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, input_0):
primals_2 = self.dense.weight
primals_3 = self.dense.bias
primals_4 = self.out_proj.weight
primals_5 = self.out_proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Clemens123/transformers | IBertClassificationHead | false | 13,214 | [
"Apache-2.0"
]
| 0 | 22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26 | https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26 |
PropMaxPool | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/yr/cyrsfiqcqep5ianv6lfxy43inavacsrots2hnf6qyokhutlu5ocy.py
# Topologically Sorted Source Nodes: [map_h], Original ATen: [aten.new_zeros]
# Source node to ATen node mapping:
# map_h => full_default
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused_new_zeros_0 = async_compile.triton('triton_poi_fused_new_zeros_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/4j/c4j3sks32tuyvczpdbb5ml4uucqn7a5ivtxxzqksz6nmtw26i7ck.py
# Topologically Sorted Source Nodes: [map_h, setitem], Original ATen: [aten.new_zeros, aten.index_put]
# Source node to ATen node mapping:
# map_h => full_default
# setitem => index_put
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%full_default, [None, None, %lift_fresh_copy, %lift_fresh_copy_1], %arg0_1), kwargs = {})
triton_poi_fused_index_put_new_zeros_1 = async_compile.triton('triton_poi_fused_index_put_new_zeros_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_new_zeros_1', 'mutated_arg_names': ['out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_new_zeros_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp11 = tl.load(in_ptr0 + (x2), xmask)
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 0, tl.int64)
tmp6 = tl.where(tmp4, tmp5, tmp3)
tmp7 = tl.full([1], 3, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.where(tmp8, tmp1, tmp7)
tmp10 = tl.where(tmp2, tmp6, tmp9)
tl.store(out_ptr0 + ((5*tmp10) + (16*x1)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/mn/cmnw3c5ofhyqdc5trwm3tlaqc5opavinp7cyamrmazw7wp3bfyfc.py
# Topologically Sorted Source Nodes: [x, setitem_2], Original ATen: [aten.max_pool2d_with_indices, aten.index_put]
# Source node to ATen node mapping:
# setitem_2 => index_put_2
# x => _low_memory_max_pool2d_with_offsets
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%unsqueeze, [1, 2], [1, 1], [0, 0], [1, 1], False), kwargs = {})
# %index_put_2 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put, [None, None, %lift_fresh_copy_6, %lift_fresh_copy_7], %squeeze), kwargs = {})
triton_poi_fused_index_put_max_pool2d_with_indices_2 = async_compile.triton('triton_poi_fused_index_put_max_pool2d_with_indices_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_max_pool2d_with_indices_2', 'mutated_arg_names': ['out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = (xindex // 3)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + (4*x1)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = x0
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp3 < tmp4
tmp6 = tl.full([1], 2, tl.int64)
tmp7 = tmp3 < tmp6
tmp8 = tl.where(tmp7, tmp4, tmp6)
tmp9 = tl.full([1], 0, tl.int64)
tmp10 = tl.where(tmp5, tmp9, tmp8)
tmp11 = tl.full([1], 3, tl.int64)
tmp12 = tl.where(tmp7, tmp6, tmp11)
tmp13 = tl.where(tmp5, tmp4, tmp12)
tl.store(out_ptr0 + (x2), tmp2, xmask)
tl.store(out_ptr1 + (tmp13 + (4*tmp10) + (16*x1)), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/yq/cyqh5ustgt67wpxctoxlywkv2o5mrdfzeya4yhalzufbrbugsngf.py
# Topologically Sorted Source Nodes: [x_1, setitem_4], Original ATen: [aten.max_pool2d_with_indices, aten.index_put]
# Source node to ATen node mapping:
# setitem_4 => index_put_4
# x_1 => _low_memory_max_pool2d_with_offsets_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%unsqueeze_1, [1, 2], [1, 1], [0, 0], [1, 1], False), kwargs = {})
# %index_put_4 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put_2, [None, None, %lift_fresh_copy_12, %lift_fresh_copy_13], %squeeze_2), kwargs = {})
triton_poi_fused_index_put_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_index_put_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_max_pool2d_with_indices_3', 'mutated_arg_names': ['out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (3*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + (3*x1)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = x0
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp3 < tmp4
tmp6 = tl.full([1], 0, tl.int64)
tmp7 = tl.where(tmp5, tmp6, tmp4)
tmp8 = tl.full([1], 2, tl.int64)
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = tl.where(tmp5, tmp8, tmp9)
tl.store(out_ptr0 + (x2), tmp2, xmask)
tl.store(out_ptr1 + (tmp10 + (4*tmp7) + (16*x1)), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/uz/cuzjdptmtn5kea3e2iifrdhpwfjgnfopv57jwvxzsjdrtwhs44zl.py
# Topologically Sorted Source Nodes: [setitem_6], Original ATen: [aten.index_put]
# Source node to ATen node mapping:
# setitem_6 => index_put_6
# Graph fragment:
# %index_put_6 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put_4, [None, None, %full_default_2, %full_default_3], %squeeze_4), kwargs = {})
triton_poi_fused_index_put_4 = async_compile.triton('triton_poi_fused_index_put_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_4', 'mutated_arg_names': ['out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + (3 + (16*x0)), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/t3/ct374civqbf45jfae4cqrvjb5bgztkdh72sqcme5rkivsalnsrib.py
# Topologically Sorted Source Nodes: [map_mask], Original ATen: [aten.new_zeros]
# Source node to ATen node mapping:
# map_mask => full_default_1
# Graph fragment:
# %full_default_1 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 1, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused_new_zeros_5 = async_compile.triton('triton_poi_fused_new_zeros_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_zeros_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_new_zeros_5(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/5h/c5hygty5oqrsyi3tzi7dvcz2rqbwh6olwszwvqtkgy7lcch4dkra.py
# Topologically Sorted Source Nodes: [map_mask, getitem, iadd, setitem_1], Original ATen: [aten.new_zeros, aten.index, aten.add, aten.index_put]
# Source node to ATen node mapping:
# getitem => index
# iadd => add
# map_mask => full_default_1
# setitem_1 => index_put_1
# Graph fragment:
# %full_default_1 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 1, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%full_default_1, [None, None, %lift_fresh_copy_2, %lift_fresh_copy_3]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%index, 1), kwargs = {})
# %index_put_1 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%full_default_1, [None, None, %lift_fresh_copy_4, %lift_fresh_copy_5], %add), kwargs = {})
triton_poi_fused_add_index_index_put_new_zeros_6 = async_compile.triton('triton_poi_fused_add_index_index_put_new_zeros_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_index_index_put_new_zeros_6', 'mutated_arg_names': ['out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_index_index_put_new_zeros_6(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 0, tl.int64)
tmp6 = tl.where(tmp4, tmp5, tmp3)
tmp7 = tl.full([1], 3, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.where(tmp8, tmp1, tmp7)
tmp10 = tl.where(tmp2, tmp6, tmp9)
tmp11 = 1.0
tl.store(out_ptr0 + ((5*tmp10) + (16*x1)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/fd/cfdxs32mvlgp43vnxwyicq5g427s7tx3ttvnz2xy5x3sbv27q767.py
# Topologically Sorted Source Nodes: [getitem_1, iadd_1, setitem_3], Original ATen: [aten.index, aten.add, aten.index_put]
# Source node to ATen node mapping:
# getitem_1 => index_1
# iadd_1 => add_1
# setitem_3 => index_put_3
# Graph fragment:
# %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%index_put_1, [None, None, %lift_fresh_copy_8, %lift_fresh_copy_9]), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%index_1, 1), kwargs = {})
# %index_put_3 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put_1, [None, None, %lift_fresh_copy_10, %lift_fresh_copy_11], %add_1), kwargs = {})
triton_poi_fused_add_index_index_put_7 = async_compile.triton('triton_poi_fused_add_index_index_put_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_index_index_put_7', 'mutated_arg_names': ['in_ptr0', 'out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_index_index_put_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = (xindex // 3)
tmp0 = x0
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.where(tmp4, tmp1, tmp3)
tmp6 = tl.full([1], 0, tl.int64)
tmp7 = tl.where(tmp2, tmp6, tmp5)
tmp8 = tl.full([1], 3, tl.int64)
tmp9 = tl.where(tmp4, tmp3, tmp8)
tmp10 = tl.where(tmp2, tmp1, tmp9)
tmp11 = tl.load(in_ptr0 + (tmp10 + (4*tmp7) + (16*x1)), xmask, eviction_policy='evict_last')
tmp12 = 1.0
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (tmp10 + (4*tmp7) + (16*x1)), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/a5/ca5fhk4lipv75hpghbgnj6yrdhvhoama6yivqpbf4o22p4u7ed3a.py
# Topologically Sorted Source Nodes: [getitem_2, iadd_2, setitem_5], Original ATen: [aten.index, aten.add, aten.index_put]
# Source node to ATen node mapping:
# getitem_2 => index_2
# iadd_2 => add_2
# setitem_5 => index_put_5
# Graph fragment:
# %index_2 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%index_put_3, [None, None, %lift_fresh_copy_14, %lift_fresh_copy_15]), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%index_2, 1), kwargs = {})
# %index_put_5 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put_3, [None, None, %lift_fresh_copy_16, %lift_fresh_copy_17], %add_2), kwargs = {})
triton_poi_fused_add_index_index_put_8 = async_compile.triton('triton_poi_fused_add_index_index_put_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_index_index_put_8', 'mutated_arg_names': ['in_ptr0', 'out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_index_index_put_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
tmp0 = x0
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 0, tl.int64)
tmp4 = tl.where(tmp2, tmp3, tmp1)
tmp5 = tl.full([1], 2, tl.int64)
tmp6 = tl.full([1], 3, tl.int64)
tmp7 = tl.where(tmp2, tmp5, tmp6)
tmp8 = tl.load(in_ptr0 + (tmp7 + (4*tmp4) + (16*x1)), xmask, eviction_policy='evict_last')
tmp9 = 1.0
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + (tmp7 + (4*tmp4) + (16*x1)), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/iy/ciyhqt7cqfmjh3ei6nfk6noimqsjjaliu4cvw4pukqzzn6qdyusq.py
# Topologically Sorted Source Nodes: [getitem_3, iadd_3, setitem_7], Original ATen: [aten.index, aten.add, aten.index_put]
# Source node to ATen node mapping:
# getitem_3 => index_3
# iadd_3 => add_3
# setitem_7 => index_put_7
# Graph fragment:
# %index_3 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%index_put_5, [None, None, %full_default_4, %full_default_5]), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%index_3, 1), kwargs = {})
# %index_put_7 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put_5, [None, None, %full_default_6, %full_default_7], %add_3), kwargs = {})
triton_poi_fused_add_index_index_put_9 = async_compile.triton('triton_poi_fused_add_index_index_put_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_index_index_put_9', 'mutated_arg_names': ['in_ptr0', 'out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_index_index_put_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (3 + (16*x0)), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [map_h], Original ATen: [aten.new_zeros]
stream0 = get_raw_stream(0)
triton_poi_fused_new_zeros_0.run(buf2, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [map_h, setitem], Original ATen: [aten.new_zeros, aten.index_put]
triton_poi_fused_index_put_new_zeros_1.run(arg0_1, buf2, 64, grid=grid(64), stream=stream0)
buf0 = empty_strided_cuda((4, 4, 1, 3), (12, 3, 48, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, setitem_2], Original ATen: [aten.max_pool2d_with_indices, aten.index_put]
triton_poi_fused_index_put_max_pool2d_with_indices_2.run(arg0_1, buf0, buf2, 48, grid=grid(48), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 1, 2), (8, 2, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, setitem_4], Original ATen: [aten.max_pool2d_with_indices, aten.index_put]
triton_poi_fused_index_put_max_pool2d_with_indices_3.run(buf0, buf1, buf2, 32, grid=grid(32), stream=stream0)
del buf0
# Topologically Sorted Source Nodes: [setitem_6], Original ATen: [aten.index_put]
triton_poi_fused_index_put_4.run(buf1, buf2, 16, grid=grid(16), stream=stream0)
del buf1
buf7 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [map_mask], Original ATen: [aten.new_zeros]
triton_poi_fused_new_zeros_5.run(buf7, 64, grid=grid(64), stream=stream0)
# Topologically Sorted Source Nodes: [map_mask, getitem, iadd, setitem_1], Original ATen: [aten.new_zeros, aten.index, aten.add, aten.index_put]
triton_poi_fused_add_index_index_put_new_zeros_6.run(buf7, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [getitem_1, iadd_1, setitem_3], Original ATen: [aten.index, aten.add, aten.index_put]
triton_poi_fused_add_index_index_put_7.run(buf7, buf7, 12, grid=grid(12), stream=stream0)
# Topologically Sorted Source Nodes: [getitem_2, iadd_2, setitem_5], Original ATen: [aten.index, aten.add, aten.index_put]
triton_poi_fused_add_index_index_put_8.run(buf7, buf7, 8, grid=grid(8), stream=stream0)
# Topologically Sorted Source Nodes: [getitem_3, iadd_3, setitem_7], Original ATen: [aten.index, aten.add, aten.index_put]
triton_poi_fused_add_index_index_put_9.run(buf7, buf7, 4, grid=grid(4), stream=stream0)
return (buf2, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn.parallel
import torch.nn as nn
import torch.utils.data
import torch.backends.cudnn
class PropMaxPool(nn.Module):
def __init__(self, cfg):
super(PropMaxPool, self).__init__()
num_layers = cfg.NUM_LAYERS
self.layers = nn.ModuleList([nn.Identity()] + [nn.MaxPool1d(2,
stride=1) for _ in range(num_layers - 1)])
self.num_layers = num_layers
def forward(self, x):
batch_size, hidden_size, num_clips = x.shape
map_h = x.new_zeros(batch_size, hidden_size, num_clips, num_clips)
map_mask = x.new_zeros(batch_size, 1, num_clips, num_clips)
for dig_idx, pool in enumerate(self.layers):
x = pool(x)
start_idxs = [s_idx for s_idx in range(0, num_clips - dig_idx, 1)]
end_idxs = [(s_idx + dig_idx) for s_idx in start_idxs]
map_h[:, :, start_idxs, end_idxs] = x
map_mask[:, :, start_idxs, end_idxs] += 1
return map_h, map_mask
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'cfg': _mock_config(NUM_LAYERS=4)}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn.parallel
import torch.nn as nn
import torch.utils.data
import torch.backends.cudnn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_index_put_new_zeros_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp11 = tl.load(in_ptr0 + x2, xmask)
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 0, tl.int64)
tmp6 = tl.where(tmp4, tmp5, tmp3)
tmp7 = tl.full([1], 3, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.where(tmp8, tmp1, tmp7)
tmp10 = tl.where(tmp2, tmp6, tmp9)
tl.store(out_ptr0 + (5 * tmp10 + 16 * x1), tmp11, xmask)
@triton.jit
def triton_poi_fused_index_put_max_pool2d_with_indices_2(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + 4 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = x0
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp3 < tmp4
tmp6 = tl.full([1], 2, tl.int64)
tmp7 = tmp3 < tmp6
tmp8 = tl.where(tmp7, tmp4, tmp6)
tmp9 = tl.full([1], 0, tl.int64)
tmp10 = tl.where(tmp5, tmp9, tmp8)
tmp11 = tl.full([1], 3, tl.int64)
tmp12 = tl.where(tmp7, tmp6, tmp11)
tmp13 = tl.where(tmp5, tmp4, tmp12)
tl.store(out_ptr0 + x2, tmp2, xmask)
tl.store(out_ptr1 + (tmp13 + 4 * tmp10 + 16 * x1), tmp2, xmask)
@triton.jit
def triton_poi_fused_index_put_max_pool2d_with_indices_3(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 3 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + 3 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = x0
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp3 < tmp4
tmp6 = tl.full([1], 0, tl.int64)
tmp7 = tl.where(tmp5, tmp6, tmp4)
tmp8 = tl.full([1], 2, tl.int64)
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = tl.where(tmp5, tmp8, tmp9)
tl.store(out_ptr0 + x2, tmp2, xmask)
tl.store(out_ptr1 + (tmp10 + 4 * tmp7 + 16 * x1), tmp2, xmask)
@triton.jit
def triton_poi_fused_index_put_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + (3 + 16 * x0), tmp2, xmask)
@triton.jit
def triton_poi_fused_new_zeros_5(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_index_index_put_new_zeros_6(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 0, tl.int64)
tmp6 = tl.where(tmp4, tmp5, tmp3)
tmp7 = tl.full([1], 3, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.where(tmp8, tmp1, tmp7)
tmp10 = tl.where(tmp2, tmp6, tmp9)
tmp11 = 1.0
tl.store(out_ptr0 + (5 * tmp10 + 16 * x1), tmp11, xmask)
@triton.jit
def triton_poi_fused_add_index_index_put_7(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3
tmp0 = x0
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.where(tmp4, tmp1, tmp3)
tmp6 = tl.full([1], 0, tl.int64)
tmp7 = tl.where(tmp2, tmp6, tmp5)
tmp8 = tl.full([1], 3, tl.int64)
tmp9 = tl.where(tmp4, tmp3, tmp8)
tmp10 = tl.where(tmp2, tmp1, tmp9)
tmp11 = tl.load(in_ptr0 + (tmp10 + 4 * tmp7 + 16 * x1), xmask,
eviction_policy='evict_last')
tmp12 = 1.0
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (tmp10 + 4 * tmp7 + 16 * x1), tmp13, xmask)
@triton.jit
def triton_poi_fused_add_index_index_put_8(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
tmp0 = x0
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 0, tl.int64)
tmp4 = tl.where(tmp2, tmp3, tmp1)
tmp5 = tl.full([1], 2, tl.int64)
tmp6 = tl.full([1], 3, tl.int64)
tmp7 = tl.where(tmp2, tmp5, tmp6)
tmp8 = tl.load(in_ptr0 + (tmp7 + 4 * tmp4 + 16 * x1), xmask,
eviction_policy='evict_last')
tmp9 = 1.0
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + (tmp7 + 4 * tmp4 + 16 * x1), tmp10, xmask)
@triton.jit
def triton_poi_fused_add_index_index_put_9(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (3 + 16 * x0), tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_new_zeros_0[grid(256)](buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
triton_poi_fused_index_put_new_zeros_1[grid(64)](arg0_1, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf0 = empty_strided_cuda((4, 4, 1, 3), (12, 3, 48, 1), torch.float32)
triton_poi_fused_index_put_max_pool2d_with_indices_2[grid(48)](arg0_1,
buf0, buf2, 48, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 1, 2), (8, 2, 32, 1), torch.float32)
triton_poi_fused_index_put_max_pool2d_with_indices_3[grid(32)](buf0,
buf1, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1)
del buf0
triton_poi_fused_index_put_4[grid(16)](buf1, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf1
buf7 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
triton_poi_fused_new_zeros_5[grid(64)](buf7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
triton_poi_fused_add_index_index_put_new_zeros_6[grid(16)](buf7, 16,
XBLOCK=16, num_warps=1, num_stages=1)
triton_poi_fused_add_index_index_put_7[grid(12)](buf7, buf7, 12,
XBLOCK=16, num_warps=1, num_stages=1)
triton_poi_fused_add_index_index_put_8[grid(8)](buf7, buf7, 8,
XBLOCK=8, num_warps=1, num_stages=1)
triton_poi_fused_add_index_index_put_9[grid(4)](buf7, buf7, 4,
XBLOCK=4, num_warps=1, num_stages=1)
return buf2, buf7
class PropMaxPoolNew(nn.Module):
def __init__(self, cfg):
super(PropMaxPoolNew, self).__init__()
num_layers = cfg.NUM_LAYERS
self.layers = nn.ModuleList([nn.Identity()] + [nn.MaxPool1d(2,
stride=1) for _ in range(num_layers - 1)])
self.num_layers = num_layers
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0], output[1]
| MicroTensor-ai/episodic-memory | PropMaxPool | false | 13,215 | [
"MIT"
]
| 0 | 295a3752ab94c7a6f45355aa2c54bffbf84b574f | https://github.com/MicroTensor-ai/episodic-memory/tree/295a3752ab94c7a6f45355aa2c54bffbf84b574f |
StructuredAutoencoderNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/vn/cvnovvgrk47qviqcskah6z34pushumedjq2l7peskzvjzbpu2sis.py
# Topologically Sorted Source Nodes: [X_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# X_1 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %primals_4), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [X_1], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf1, primals_2, primals_4, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [X_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_6
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
from collections import OrderedDict
class StructuredAutoencoderNet(nn.Module):
def __init__(self, p, encoder_config, decoder_config, dropout_rate=0):
super().__init__()
self.p = p
self.encoder_config = encoder_config
self.decoder_config = decoder_config
self.weights_layer = []
index = 0
self.encoder_layer = []
for i in range(len(self.encoder_config['dimension']) - 1):
self.encoder_layer.append(('linear' + str(index), nn.Linear(int
(self.encoder_config['dimension'][i]), int(self.
encoder_config['dimension'][i + 1]))))
if i != len(self.encoder_config['dimension']) - 2:
self.encoder_layer.append(('Sigmoid' + str(index), nn.
Sigmoid()))
self.encoder_layer.append(('dropout' + str(index), nn.
Dropout(p=dropout_rate)))
index += 1
for index, layer in enumerate(self.encoder_layer):
if layer[0] == 'linear':
self.weights_layer.append(torch.nn.Parameter(layer[1].weight))
self.encoder_layer[index][1].weight = self.weights_layer[-1]
index = 0
self.decoder_layer = []
for i in range(len(self.decoder_config['dimension']) - 1):
if i != 0:
self.decoder_layer.append(('dropout' + str(index), nn.
Dropout(p=dropout_rate)))
self.decoder_layer.append(('linear' + str(index), nn.Linear(int
(self.decoder_config['dimension'][i]), int(self.
decoder_config['dimension'][i + 1]))))
if i != len(self.decoder_config['dimension']) - 2:
self.decoder_layer.append(('Sigmoid' + str(index), nn.
Sigmoid()))
index += 1
self.encoder_net = nn.Sequential(OrderedDict(self.encoder_layer))
self.decoder_net = nn.Sequential(OrderedDict(self.decoder_layer))
def encode(self, X, mask):
index = 0
for layer in self.encoder_layer:
if layer[0] == 'linear':
X = torch.nn.functional.linear(X, self.weights_layer[index])
index += 1
else:
X = layer[1](X)
X = X * mask
return X
def decode(self, X):
index = len(self.weights_layer) - 1
for layer in self.decoder_layer:
if layer[0] == 'linear':
X = torch.nn.functional.linear(X, self.weights_layer[index].t()
)
index -= 1
else:
X = layer[1](X)
return X
def forward(self, X, mask):
X = self.encode(X, mask)
X = self.decode(X)
return X
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'p': 4, 'encoder_config': _mock_config(dimension=[4, 4]),
'decoder_config': _mock_config(dimension=[4, 4])}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](buf1, primals_2, primals_4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_6
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_5
class StructuredAutoencoderNetNew(nn.Module):
def __init__(self, p, encoder_config, decoder_config, dropout_rate=0):
super().__init__()
self.p = p
self.encoder_config = encoder_config
self.decoder_config = decoder_config
self.weights_layer = []
index = 0
self.encoder_layer = []
for i in range(len(self.encoder_config['dimension']) - 1):
self.encoder_layer.append(('linear' + str(index), nn.Linear(int
(self.encoder_config['dimension'][i]), int(self.
encoder_config['dimension'][i + 1]))))
if i != len(self.encoder_config['dimension']) - 2:
self.encoder_layer.append(('Sigmoid' + str(index), nn.
Sigmoid()))
self.encoder_layer.append(('dropout' + str(index), nn.
Dropout(p=dropout_rate)))
index += 1
for index, layer in enumerate(self.encoder_layer):
if layer[0] == 'linear':
self.weights_layer.append(torch.nn.Parameter(layer[1].weight))
self.encoder_layer[index][1].weight = self.weights_layer[-1]
index = 0
self.decoder_layer = []
for i in range(len(self.decoder_config['dimension']) - 1):
if i != 0:
self.decoder_layer.append(('dropout' + str(index), nn.
Dropout(p=dropout_rate)))
self.decoder_layer.append(('linear' + str(index), nn.Linear(int
(self.decoder_config['dimension'][i]), int(self.
decoder_config['dimension'][i + 1]))))
if i != len(self.decoder_config['dimension']) - 2:
self.decoder_layer.append(('Sigmoid' + str(index), nn.
Sigmoid()))
index += 1
self.encoder_net = nn.Sequential(OrderedDict(self.encoder_layer))
self.decoder_net = nn.Sequential(OrderedDict(self.decoder_layer))
def encode(self, X, mask):
index = 0
for layer in self.encoder_layer:
if layer[0] == 'linear':
X = torch.nn.functional.linear(X, self.weights_layer[index])
index += 1
else:
X = layer[1](X)
X = X * mask
return X
def decode(self, X):
index = len(self.weights_layer) - 1
for layer in self.decoder_layer:
if layer[0] == 'linear':
X = torch.nn.functional.linear(X, self.weights_layer[index].t()
)
index -= 1
else:
X = layer[1](X)
return X
def forward(self, input_0, input_1):
primals_1 = self.encoder_net.linear0.weight
primals_2 = self.encoder_net.linear0.bias
primals_5 = self.decoder_net.linear0.weight
primals_6 = self.decoder_net.linear0.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| CHuanSite/smautoPy | StructuredAutoencoderNet | false | 13,216 | [
"MIT"
]
| 0 | 46c6b2088fc3c488870cee2ab88ac9f1ce779c0d | https://github.com/CHuanSite/smautoPy/tree/46c6b2088fc3c488870cee2ab88ac9f1ce779c0d |
LxmertCrossAttentionLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {})
# %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/5j/c5jll3kxtd32cl7pwubrb5oky2mtzckfgip2xbwad7crvvp4zk4r.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_default_2, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_default_2, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/kt/cktnex5febczl2ac6zugjmcksgsd5kjdufazv65vtepuwob3cb7a.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {})
# %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_default_2, -inf), kwargs = {})
# %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {})
# %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {})
# %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {})
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (x2), xmask)
tmp26 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = float("-inf")
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = (tmp4 != 0)
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = (tmp9 != 0)
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = (tmp15 != 0)
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = (tmp21 != 0)
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + (x2), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# context_layer_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/6m/c6mhj5zwirfhy5e4o45uaeov72uwfby4udubpm2fcz42iqvs2g57.py
# Topologically Sorted Source Nodes: [add, hidden_states_2], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# hidden_states_2 => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/i6/ci6ua4lfqzz3v6lbsh75noa7k5ird3udb6b5bjh7gxx4qxuz7gz3.py
# Topologically Sorted Source Nodes: [add, hidden_states_2], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# hidden_states_2 => add_1, add_2, mul, mul_1, rsqrt, sub_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-12), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_11), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_12), kwargs = {})
triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-12
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(buf5, buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(buf2, primals_8, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_8
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf9, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11)
del primals_10
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [add, hidden_states_2], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_5.run(buf11, primals_3, buf12, buf13, 16, grid=grid(16), stream=stream0)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, hidden_states_2], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_6.run(buf11, primals_3, buf12, buf13, primals_11, primals_12, buf14, 64, grid=grid(64), stream=stream0)
del buf12
del buf13
del primals_12
return (buf14, primals_3, primals_11, reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), buf11, primals_9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
from torch import nn
import torch.utils.checkpoint
class LxmertAttention(nn.Module):
def __init__(self, config, ctx_dim=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})'
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.head_size = self.num_attention_heads * self.attention_head_size
if ctx_dim is None:
ctx_dim = config.hidden_size
self.query = nn.Linear(config.hidden_size, self.head_size)
self.key = nn.Linear(ctx_dim, self.head_size)
self.value = nn.Linear(ctx_dim, self.head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, context, attention_mask=None,
output_attentions=False):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(context)
mixed_value_layer = self.value(context)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (
context_layer,)
return outputs
class LxmertAttentionOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LxmertCrossAttentionLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.att = LxmertAttention(config)
self.output = LxmertAttentionOutput(config)
def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None,
output_attentions=False):
output = self.att(input_tensor, ctx_tensor, ctx_att_mask,
output_attentions=output_attentions)
if output_attentions:
attention_probs = output[1]
attention_output = self.output(output[0], input_tensor)
outputs = (attention_output, attention_probs
) if output_attentions else (attention_output,)
return outputs
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
from torch import nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-12
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_3[grid(16, 4)](buf2, primals_8, buf8, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_8
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_10, reinterpret_tensor(buf10, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf11)
del primals_10
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](buf11, primals_3,
buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](buf11, primals_3,
buf12, buf13, primals_11, primals_12, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf12
del buf13
del primals_12
return buf14, primals_3, primals_11, reinterpret_tensor(primals_6, (16,
4), (4, 1), 0), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), buf11, primals_9
class LxmertAttention(nn.Module):
def __init__(self, config, ctx_dim=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})'
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.head_size = self.num_attention_heads * self.attention_head_size
if ctx_dim is None:
ctx_dim = config.hidden_size
self.query = nn.Linear(config.hidden_size, self.head_size)
self.key = nn.Linear(ctx_dim, self.head_size)
self.value = nn.Linear(ctx_dim, self.head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, context, attention_mask=None,
output_attentions=False):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(context)
mixed_value_layer = self.value(context)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (
context_layer,)
return outputs
class LxmertAttentionOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LxmertCrossAttentionLayerNew(nn.Module):
def __init__(self, config):
super().__init__()
self.att = LxmertAttention(config)
self.output = LxmertAttentionOutput(config)
def forward(self, input_0, input_1):
primals_1 = self.att.query.weight
primals_2 = self.att.query.bias
primals_4 = self.att.key.weight
primals_5 = self.att.key.bias
primals_7 = self.att.value.weight
primals_8 = self.att.value.bias
primals_9 = self.output.dense.weight
primals_10 = self.output.dense.bias
primals_11 = self.output.LayerNorm.weight
primals_12 = self.output.LayerNorm.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| Clemens123/transformers | LxmertCrossAttentionLayer | false | 13,217 | [
"Apache-2.0"
]
| 0 | 22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26 | https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26 |
MPNetSelfAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {})
# %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/5j/c5jll3kxtd32cl7pwubrb5oky2mtzckfgip2xbwad7crvvp4zk4r.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_default_2, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_default_2, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/kt/cktnex5febczl2ac6zugjmcksgsd5kjdufazv65vtepuwob3cb7a.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {})
# %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_default_2, -inf), kwargs = {})
# %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {})
# %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {})
# %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {})
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (x2), xmask)
tmp26 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = float("-inf")
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = (tmp4 != 0)
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = (tmp9 != 0)
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = (tmp15 != 0)
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = (tmp21 != 0)
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + (x2), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [c_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# c_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(buf5, buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(buf2, primals_7, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_7
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [c_1], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf9, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [o], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11)
del primals_9
return (reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), primals_8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
from torch import nn
import torch.utils.checkpoint
class MPNetSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if (config.hidden_size % config.num_attention_heads != 0 and not
hasattr(config, 'embedding_size')):
raise ValueError(
f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})'
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.q = nn.Linear(config.hidden_size, self.all_head_size)
self.k = nn.Linear(config.hidden_size, self.all_head_size)
self.v = nn.Linear(config.hidden_size, self.all_head_size)
self.o = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask=None, head_mask=None,
position_bias=None, output_attentions=False, **kwargs):
q = self.q(hidden_states)
k = self.k(hidden_states)
v = self.v(hidden_states)
q = self.transpose_for_scores(q)
k = self.transpose_for_scores(k)
v = self.transpose_for_scores(v)
attention_scores = torch.matmul(q, k.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
if position_bias is not None:
attention_scores += position_bias
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
c = torch.matmul(attention_probs, v)
c = c.permute(0, 2, 1, 3).contiguous()
new_c_shape = c.size()[:-2] + (self.all_head_size,)
c = c.view(*new_c_shape)
o = self.o(c)
outputs = (o, attention_probs) if output_attentions else (o,)
return outputs
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf8, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_7
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_9, reinterpret_tensor(buf10, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf11)
del primals_9
return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), primals_8
class MPNetSelfAttentionNew(nn.Module):
def __init__(self, config):
super().__init__()
if (config.hidden_size % config.num_attention_heads != 0 and not
hasattr(config, 'embedding_size')):
raise ValueError(
f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})'
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.q = nn.Linear(config.hidden_size, self.all_head_size)
self.k = nn.Linear(config.hidden_size, self.all_head_size)
self.v = nn.Linear(config.hidden_size, self.all_head_size)
self.o = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, input_0):
primals_1 = self.q.weight
primals_2 = self.q.bias
primals_4 = self.k.weight
primals_5 = self.k.bias
primals_6 = self.v.weight
primals_7 = self.v.bias
primals_8 = self.o.weight
primals_9 = self.o.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| Clemens123/transformers | MPNetSelfAttention | false | 13,218 | [
"Apache-2.0"
]
| 0 | 22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26 | https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26 |
BertOutAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/dk/cdk4odz276xorciau5ehgl7f3s2mgkf3hrye6xep6kzubczdeqqy.py
# Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attention_scores => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ug/cug36vlqloqzxlxqd52ctfz6yoxrb4p5b2ayo3atwmh6vhn2ajrl.py
# Topologically Sorted Source Nodes: [attention_scores_1, attention_probs], Original ATen: [aten.div, aten._softmax]
# Source node to ATen node mapping:
# attention_probs => exp
# attention_scores_1 => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {})
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_div_1 = async_compile.triton('triton_poi_fused__softmax_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_div_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x2 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py
# Topologically Sorted Source Nodes: [attention_probs], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_probs => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/xt/cxtkkmujo4ytg6ycpz5lk5livtstr63pg5nsf5ijewjbtrfrqx6k.py
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# context_layer_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_scores_1, attention_probs], Original ATen: [aten.div, aten._softmax]
triton_poi_fused__softmax_div_1.run(buf5, buf6, buf7, 256, grid=grid(256), stream=stream0)
buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [attention_probs], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf7, buf8, 256, grid=grid(256), stream=stream0)
del buf7
buf9 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [context_layer], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf2, primals_8, buf9, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_8
buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [context_layer], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0)
del buf10
return (reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0), buf6, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
class BertOutAttention(nn.Module):
def __init__(self, config, ctx_dim=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
if ctx_dim is None:
ctx_dim = config.hidden_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(ctx_dim, self.all_head_size)
self.value = nn.Linear(ctx_dim, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, context, attention_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(context)
mixed_value_layer = self.value(context)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer, attention_scores
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_div_1(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x2 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_div_1[grid(256)](buf5, buf6, buf7, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_2[grid(256)](buf7, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf7
buf9 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_8, buf9, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_8
buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf10
return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0
), buf6, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), buf8, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class BertOutAttentionNew(nn.Module):
def __init__(self, config, ctx_dim=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
if ctx_dim is None:
ctx_dim = config.hidden_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(ctx_dim, self.all_head_size)
self.value = nn.Linear(ctx_dim, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, input_0, input_1):
primals_1 = self.query.weight
primals_2 = self.query.bias
primals_4 = self.key.weight
primals_5 = self.key.bias
primals_7 = self.value.weight
primals_8 = self.value.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
| MarSaKi/Recurrent-VLN-BERT | BertOutAttention | false | 13,219 | [
"MIT"
]
| 0 | c1170f9ca48c234a0c3ded19f9273f2fdcd571d6 | https://github.com/MarSaKi/Recurrent-VLN-BERT/tree/c1170f9ca48c234a0c3ded19f9273f2fdcd571d6 |
IBertLMHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/k2/ck2ensfw3jymkm3sdnn2b3sukex4hedkmwsqjeuwykarc22y3nse.py
# Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, x_1, x_2], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# pow_1 => pow_1
# tanh => tanh
# x_1 => mul_3
# x_2 => var_mean
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 3), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.7978845608028654), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_2,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {})
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%mul_3, [3]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_mul_native_layer_norm_pow_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_pow_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_pow_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_pow_tanh_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tmp15 = tmp14 * tmp1
tmp16 = tmp14 * tmp14
tmp17 = tmp16 * tmp14
tmp18 = tmp17 * tmp5
tmp19 = tmp14 + tmp18
tmp20 = tmp19 * tmp8
tmp21 = libdevice.tanh(tmp20)
tmp22 = tmp21 + tmp11
tmp23 = tmp15 * tmp22
tmp24 = tmp13 + tmp23
tmp26 = tmp25 * tmp1
tmp27 = tmp25 * tmp25
tmp28 = tmp27 * tmp25
tmp29 = tmp28 * tmp5
tmp30 = tmp25 + tmp29
tmp31 = tmp30 * tmp8
tmp32 = libdevice.tanh(tmp31)
tmp33 = tmp32 + tmp11
tmp34 = tmp26 * tmp33
tmp35 = tmp24 + tmp34
tmp37 = tmp36 * tmp1
tmp38 = tmp36 * tmp36
tmp39 = tmp38 * tmp36
tmp40 = tmp39 * tmp5
tmp41 = tmp36 + tmp40
tmp42 = tmp41 * tmp8
tmp43 = libdevice.tanh(tmp42)
tmp44 = tmp43 + tmp11
tmp45 = tmp37 * tmp44
tmp46 = tmp35 + tmp45
tmp47 = 4.0
tmp48 = tmp46 / tmp47
tmp49 = tmp13 - tmp48
tmp50 = tmp49 * tmp49
tmp51 = tmp23 - tmp48
tmp52 = tmp51 * tmp51
tmp53 = tmp50 + tmp52
tmp54 = tmp34 - tmp48
tmp55 = tmp54 * tmp54
tmp56 = tmp53 + tmp55
tmp57 = tmp45 - tmp48
tmp58 = tmp57 * tmp57
tmp59 = tmp56 + tmp58
tmp60 = tmp59 / tmp47
tl.store(out_ptr0 + (x0), tmp48, xmask)
tl.store(out_ptr1 + (x0), tmp60, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/4h/c4hcwvzif5h6zqdmh45kywbrslvqedlkzfpdkfds26puiwek6kyk.py
# Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, x_1, x_2], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# pow_1 => pow_1
# tanh => tanh
# x_1 => mul_3
# x_2 => add_2, add_3, mul_4, mul_5, rsqrt, sub
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 3), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.7978845608028654), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_2,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {})
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, %getitem_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %primals_4), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %primals_5), kwargs = {})
triton_poi_fused_add_mul_native_layer_norm_pow_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_pow_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_pow_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_pow_tanh_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp14 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tmp15 = tmp13 - tmp14
tmp17 = tmp16 + tmp11
tmp18 = libdevice.rsqrt(tmp17)
tmp19 = tmp15 * tmp18
tmp21 = tmp19 * tmp20
tmp23 = tmp21 + tmp22
tl.store(out_ptr0 + (x2), tmp23, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, x_1, x_2], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh, aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_native_layer_norm_pow_tanh_0.run(buf0, buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, x_1, x_2], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh, aten.native_layer_norm]
triton_poi_fused_add_mul_native_layer_norm_pow_tanh_1.run(buf0, buf1, buf2, primals_4, primals_5, buf3, 256, grid=grid(256), stream=stream0)
del buf1
del buf2
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
from torch import nn
import torch.utils.checkpoint
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class IBertLMHead(nn.Module):
"""I-BERT Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
x = self.decoder(x)
return x
def _tie_weights(self):
self.bias = self.decoder.bias
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, layer_norm_eps=1,
vocab_size=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch import nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_pow_tanh_0(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp36 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tmp15 = tmp14 * tmp1
tmp16 = tmp14 * tmp14
tmp17 = tmp16 * tmp14
tmp18 = tmp17 * tmp5
tmp19 = tmp14 + tmp18
tmp20 = tmp19 * tmp8
tmp21 = libdevice.tanh(tmp20)
tmp22 = tmp21 + tmp11
tmp23 = tmp15 * tmp22
tmp24 = tmp13 + tmp23
tmp26 = tmp25 * tmp1
tmp27 = tmp25 * tmp25
tmp28 = tmp27 * tmp25
tmp29 = tmp28 * tmp5
tmp30 = tmp25 + tmp29
tmp31 = tmp30 * tmp8
tmp32 = libdevice.tanh(tmp31)
tmp33 = tmp32 + tmp11
tmp34 = tmp26 * tmp33
tmp35 = tmp24 + tmp34
tmp37 = tmp36 * tmp1
tmp38 = tmp36 * tmp36
tmp39 = tmp38 * tmp36
tmp40 = tmp39 * tmp5
tmp41 = tmp36 + tmp40
tmp42 = tmp41 * tmp8
tmp43 = libdevice.tanh(tmp42)
tmp44 = tmp43 + tmp11
tmp45 = tmp37 * tmp44
tmp46 = tmp35 + tmp45
tmp47 = 4.0
tmp48 = tmp46 / tmp47
tmp49 = tmp13 - tmp48
tmp50 = tmp49 * tmp49
tmp51 = tmp23 - tmp48
tmp52 = tmp51 * tmp51
tmp53 = tmp50 + tmp52
tmp54 = tmp34 - tmp48
tmp55 = tmp54 * tmp54
tmp56 = tmp53 + tmp55
tmp57 = tmp45 - tmp48
tmp58 = tmp57 * tmp57
tmp59 = tmp56 + tmp58
tmp60 = tmp59 / tmp47
tl.store(out_ptr0 + x0, tmp48, xmask)
tl.store(out_ptr1 + x0, tmp60, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_pow_tanh_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp14 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tmp15 = tmp13 - tmp14
tmp17 = tmp16 + tmp11
tmp18 = libdevice.rsqrt(tmp17)
tmp19 = tmp15 * tmp18
tmp21 = tmp19 * tmp20
tmp23 = tmp21 + tmp22
tl.store(out_ptr0 + x2, tmp23, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_native_layer_norm_pow_tanh_0[grid(64)](buf0,
buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_pow_tanh_1[grid(256)](buf0,
buf1, buf2, primals_4, primals_5, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf1
del buf2
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_6
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class IBertLMHeadNew(nn.Module):
"""I-BERT Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def _tie_weights(self):
self.bias = self.decoder.bias
def forward(self, input_0):
primals_2 = self.bias
primals_1 = self.dense.weight
primals_4 = self.dense.bias
primals_5 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_6 = self.decoder.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| Clemens123/transformers | IBertLMHead | false | 13,220 | [
"Apache-2.0"
]
| 0 | 22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26 | https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26 |
BoundNeg | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/hk/chkwhavqfzukyktba7dvmj6ss52pfnxbzhqfb7gpkrye7sko7lrp.py
# Topologically Sorted Source Nodes: [neg], Original ATen: [aten.neg]
# Source node to ATen node mapping:
# neg => neg
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_neg_0 = async_compile.triton('triton_poi_fused_neg_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_neg_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_neg_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = -tmp0
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [neg], Original ATen: [aten.neg]
stream0 = get_raw_stream(0)
triton_poi_fused_neg_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from numbers import Number
from torch.nn import MSELoss
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
if not isinstance(eps, Number):
if not isinstance(eps, torch.Tensor):
self.eps = torch.tensor(eps)
else:
self.eps = eps
if len(self.eps.shape) == 1:
self.eps = torch.diag(self.eps)
assert self.eps.shape[0] == self.eps.shape[1
], 'Argument [eps] must form a n by n square matrix.'
self.norm = 2
else:
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
if isinstance(self.eps, Number):
deviation = A.norm(self.dual_norm, -1) * self.eps
else:
deviation = A.matmul(self.eps.transpose(0, 1)).norm(
self.dual_norm, -1)
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
elif isinstance(self.eps, Number):
bound = x + sign * self.eps
else:
bound = x + sign * self.eps.transpose(0, 1).norm(self.
dual_norm, -1)
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
center = (x_U + x_L) / 2.0
diff = (x_U - x_L) / 2.0
if not A.identity == 1:
unfold_input = F.unfold(center, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound = prod.view(prod.size(0), prod.size(1), int(math.
sqrt(prod.size(2))), int(math.sqrt(prod.size(2))))
unfold_input = F.unfold(diff, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches.abs()
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound += sign * prod.view(prod.size(0), prod.size(1),
int(math.sqrt(prod.size(2))), int(math.sqrt(prod.
size(2))))
else:
bound = center + sign * diff
return bound
else:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
raise NotImplementedError()
if isinstance(A, eyeC) or isinstance(A, torch.Tensor):
return concretize_matrix(A)
elif isinstance(A, Patches):
return concretize_patches(A)
elif isinstance(A, BoundList):
for b in A.bound_list:
if isinstance(b, eyeC) or isinstance(b, torch.Tensor):
pass
else:
raise NotImplementedError()
def init(self, x, aux=None, forward=False):
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
else:
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
if self.norm == np.inf:
if self.x_L is None and self.x_U is None:
return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps)
else:
return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})'
.format(self.eps, self.x_L, self.x_U))
else:
return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm,
self.eps)
class PerturbationSynonym(Perturbation):
def __init__(self, budget, eps=1.0, use_simple=False):
super(PerturbationSynonym, self).__init__()
self._load_synonyms()
self.budget = budget
self.eps = eps
self.use_simple = use_simple
self.model = None
self.train = False
def __repr__(self):
return (
'perturbation(Synonym-based word substitution budget={}, eps={})'
.format(self.budget, self.eps))
def _load_synonyms(self, path='data/synonyms.json'):
with open(path) as file:
self.synonym = json.loads(file.read())
logger.info('Synonym list loaded for {} words'.format(len(self.
synonym)))
def set_train(self, train):
self.train = train
def concretize(self, x, A, sign, aux):
assert self.model is not None
x_rep, mask, can_be_replaced = aux
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
dim_out = A.shape[1]
max_num_cand = x_rep.shape[2]
mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32,
device=A.device)
num_pos = int(np.max(np.sum(can_be_replaced, axis=-1)))
update_A = A.shape[-1] > num_pos * dim_word
if update_A:
bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape(
batch_size, -1, 1)).squeeze(-1)
else:
bias = 0.0
A = A.reshape(batch_size, dim_out, -1, dim_word)
A_new, x_new, x_rep_new, mask_new = [], [], [], []
zeros_A = torch.zeros(dim_out, dim_word, device=A.device)
zeros_w = torch.zeros(dim_word, device=A.device)
zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device)
zeros_mask = torch.zeros(max_num_cand, device=A.device)
for t in range(batch_size):
cnt = 0
for i in range(0, length):
if can_be_replaced[t][i]:
if update_A:
A_new.append(A[t, :, i, :])
x_new.append(x[t][i])
x_rep_new.append(x_rep[t][i])
mask_new.append(mask[t][i])
cnt += 1
if update_A:
A_new += [zeros_A] * (num_pos - cnt)
x_new += [zeros_w] * (num_pos - cnt)
x_rep_new += [zeros_rep] * (num_pos - cnt)
mask_new += [zeros_mask] * (num_pos - cnt)
if update_A:
A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word
).transpose(1, 2)
x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word)
x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos,
max_num_cand, dim_word)
mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand)
length = num_pos
A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2)
x = x.reshape(batch_size, length, -1, 1)
if sign == 1:
cmp, init = torch.max, -1e+30
else:
cmp, init = torch.min, 1e+30
init_tensor = torch.ones(batch_size, dim_out) * init
dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1)
]
dp[0][0] = torch.zeros(batch_size, dim_out)
A = A.reshape(batch_size * length, A.shape[2], A.shape[3])
Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x.
shape[3])).reshape(batch_size, length, A.shape[1])
Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length,
max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size,
length, A.shape[1], max_num_cand)
Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2)
Ax_rep_bound = cmp(Ax_rep, dim=-1).values
if self.use_simple and self.train:
return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias
for i in range(1, length + 1):
dp[i][0] = dp[i - 1][0] + Ax[:, i - 1]
for j in range(1, self.budget + 1):
dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1
] + Ax_rep_bound[:, i - 1])
dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1,
batch_size, dim_out)
return cmp(dp, dim=0).values + bias
def init(self, x, aux=None, forward=False):
tokens, batch = aux
self.tokens = tokens
assert len(x.shape) == 3
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
max_pos = 1
can_be_replaced = np.zeros((batch_size, length), dtype=np.bool)
self._build_substitution(batch)
for t in range(batch_size):
cnt = 0
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
for i in range(len(tokens[t])):
if tokens[t][i] == '[UNK]' or len(candidates[i]
) == 0 or tokens[t][i] != candidates[i][0]:
continue
for w in candidates[i][1:]:
if w in self.model.vocab:
can_be_replaced[t][i] = True
cnt += 1
break
max_pos = max(max_pos, cnt)
dim = max_pos * dim_word
if forward:
eye = torch.eye(dim_word)
lw = torch.zeros(batch_size, dim, length, dim_word)
lb = torch.zeros_like(x)
word_embeddings = self.model.word_embeddings.weight
vocab = self.model.vocab
x_rep = [[[] for i in range(length)] for t in range(batch_size)]
max_num_cand = 1
for t in range(batch_size):
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
cnt = 0
for i in range(length):
if can_be_replaced[t][i]:
word_embed = word_embeddings[vocab[tokens[t][i]]]
other_embed = x[t, i] - word_embed
if forward:
lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye
lb[t, i, :] = torch.zeros_like(word_embed)
for w in candidates[i][1:]:
if w in self.model.vocab:
x_rep[t][i].append(word_embeddings[self.model.
vocab[w]] + other_embed)
max_num_cand = max(max_num_cand, len(x_rep[t][i]))
cnt += 1
elif forward:
lb[t, i, :] = x[t, i, :]
if forward:
uw, ub = lw, lb
else:
lw = lb = uw = ub = None
zeros = torch.zeros(dim_word, device=x.device)
x_rep_, mask = [], []
for t in range(batch_size):
for i in range(length):
x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep
[t][i]))
mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len(
x_rep[t][i]))
x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand,
dim_word)
mask = torch.tensor(mask, dtype=torch.float32, device=x.device
).reshape(batch_size, length, max_num_cand)
x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps)
inf = 1e+20
lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * inf, dim=2).values
upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * -inf, dim=2).values
lower = torch.min(lower, x)
upper = torch.max(upper, x)
return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask,
can_be_replaced)
def _build_substitution(self, batch):
for t, example in enumerate(batch):
if 'candidates' not in example or example['candidates'] is None:
candidates = []
tokens = example['sentence'].strip().lower().split(' ')
for i in range(len(tokens)):
_cand = []
if tokens[i] in self.synonym:
for w in self.synonym[tokens[i]]:
if w in self.model.vocab:
_cand.append(w)
if len(_cand) > 0:
_cand = [tokens[i]] + _cand
candidates.append(_cand)
example['candidates'] = candidates
class Interval(tuple):
def __new__(self, lb=None, ub=None, ptb=None):
if ub is None:
assert isinstance(lb, tuple)
lb, ub = lb
return tuple.__new__(Interval, (lb, ub))
def __init__(self, lb, ub, ptb=None):
if ptb is None:
self.ptb = None
assert lb is ub
elif not isinstance(ptb, Perturbation):
raise ValueError(
'ptb must be a Perturbation object or None. Got type {}'.
format(type(ptb)))
else:
self.ptb = ptb
def __str__(self):
return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb)
def __repr__(self):
return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1],
self.ptb)
"""Checking if the other interval is tuple, keep the perturbation."""
@staticmethod
def make_interval(lb, ub, other):
if isinstance(other, Interval):
return Interval(lb, ub, other.ptb)
else:
return lb, ub
"""Given a tuple or Interval object, returns the norm and eps."""
@staticmethod
def get_perturbation(interval):
if isinstance(interval, Interval):
if isinstance(interval.ptb, PerturbationLpNorm):
return interval.ptb.norm, interval.ptb.eps
elif isinstance(interval.ptb, PerturbationSynonym):
return np.inf, 1.0
elif isinstance(interval.ptb, PerturbationL0Norm):
return 0, interval.ptb.eps, interval.ptb.ratio
elif interval.ptb is None:
raise RuntimeError(
'get_perturbation() encountered an interval that is not perturbed.'
)
else:
raise RuntimeError(
'get_perturbation() does not know how to handle {}'.
format(type(interval.ptb)))
else:
return np.inf, np.nan
"""Checking if a Interval or tuple object has perturbation enabled."""
@staticmethod
def is_perturbed(interval):
if isinstance(interval, Interval) and interval.ptb is None:
return False
else:
return True
class Bound(nn.Module):
def __init__(self, input_name, name, ori_name, attr={}, inputs=[],
output_index=0, options={}, device=None):
super().__init__()
self.output_name = []
(self.input_name, self.name, self.ori_name, self.attr, self.inputs,
self.output_index, self.options, self.device) = (input_name,
name, ori_name, attr, inputs, output_index, options, device)
self.fv = None
self.from_input = False
self.bounded = False
self.IBP_rets = None
self.perturbed = False
if options is not None and 'loss_fusion' in options:
self.loss_fusion = options['loss_fusion']
else:
self.loss_fusion = False
"""Check if the i-th input is with perturbation or not."""
def is_input_perturbed(self, i=0):
return self.inputs[i].perturbed
def forward(self, *x):
raise NotImplementedError
def interval_propagate(self, *v):
assert len(v) == 1
h_L, h_U = v[0]
return Interval.make_interval(self.forward(h_L), self.forward(h_U),
v[0])
def bound_forward(self, dim_in, last):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA):
raise NotImplementedError
def infer_batch_dim(self, batch_size, *x):
None
raise NotImplementedError
def broadcast_backward(self, A, x):
shape = x.default_shape
batch_dim = max(self.batch_dim, 0)
if isinstance(A, torch.Tensor):
if x.batch_dim == -1:
shape = torch.Size([A.shape[batch_dim + 1]] + list(shape))
dims = []
cnt_sum = A.ndim - len(shape) - 1
for i in range(1, A.ndim):
if i != self.batch_dim + 1 and cnt_sum > 0:
dims.append(i)
cnt_sum -= 1
if dims:
A = torch.sum(A, dim=dims)
else:
dims = list(range(1, 1 + A.ndim - 1 - len(shape)))
if dims:
A = torch.sum(A, dim=dims)
dims = []
for i in range(len(shape)):
if shape[i] == 1 and A.shape[i + 1] != 1:
dims.append(i + 1)
if dims:
A = torch.sum(A, dim=dims, keepdim=True)
assert A.shape[1:] == shape
elif type(A) == Patches:
pass
return A
@staticmethod
def broadcast_forward(dim_in, x, shape_res):
lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub
shape_x, shape_res = list(x.lb.shape), list(shape_res)
if lw is None:
lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device)
has_batch_size = False
else:
has_batch_size = True
while len(shape_x) < len(shape_res):
if not has_batch_size:
lw, uw = lw.unsqueeze(0), uw.unsqueeze(0)
lb, ub = lb.unsqueeze(0), ub.unsqueeze(0)
shape_x = [1] + shape_x
has_batch_size = True
else:
lw, uw = lw.unsqueeze(2), uw.unsqueeze(2)
lb, ub = lb.unsqueeze(1), ub.unsqueeze(1)
shape_x = [shape_x[0], 1] + shape_x[1:]
repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))]
lb, ub = lb.repeat(*repeat), ub.repeat(*repeat)
repeat = repeat[:1] + [1] + repeat[1:]
lw, uw = lw.repeat(*repeat), uw.repeat(*repeat)
return lw, lb, uw, ub
def get_bias(self, A, bias):
if A is None:
return 0
assert not isnan(A)
assert not isnan(bias)
if isinstance(A, torch.Tensor):
if torch.norm(A, p=1) < epsilon:
return 0
output_dim = A.shape[0]
if self.batch_dim != -1:
batch_size = A.shape[self.batch_dim + 1]
A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1
]).astype(np.int32), batch_size, np.prod(A.shape[self.
batch_dim + 2:]).astype(np.int32)]
A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size,
output_dim, -1)
bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape(
batch_size, -1, 1)
bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1)
else:
batch_size = A.shape[1]
A = A.view(output_dim, batch_size, -1)
bias_new = A.matmul(bias.view(-1))
if isnan(bias_new):
return 0
else:
return bias_new
elif type(A) == Patches:
if torch.norm(A.patches, p=1) < epsilon:
return 0
if self.batch_dim != -1:
batch_size = bias.shape[0]
bias = F.unfold(bias, kernel_size=A.patches.size(-1),
stride=A.stride, padding=A.padding).transpose(-2, -1
).unsqueeze(-2)
bias.size(1)
patches = A.patches.view(A.patches.size(0), A.patches.size(
1), A.patches.size(-4), A.patches.size(-1) * A.patches.
size(-2) * A.patches.size(-3))
prod = bias * patches
bias_new = prod.sum(-1).transpose(-2, -1)
bias_new = bias_new.view(batch_size, bias_new.size(-2), int
(math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new.
size(-1))))
else:
patches = A.patches
patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias
patches_reshape = patches_reshape.transpose(-1, -2)
return patches_reshape.view(patches_reshape.size(0),
patches_reshape.size(1), int(math.sqrt(patches_reshape.
size(2))), -1).transpose(0, 1)
return bias_new
else:
return NotImplementedError()
class BoundNeg(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
def forward(self, x):
return -x
def bound_backward(self, last_lA, last_uA, x):
return [(-last_lA if last_lA is not None else None, -last_uA if
last_uA is not None else None)], 0, 0
def bound_forward(self, dim_in, x):
return LinearBound(-x.lw, -x.lb, -x.uw, -x.ub)
def interval_propagate(self, *v):
return -v[0][1], -v[0][0]
def infer_batch_dim(self, batch_size, *x):
return x[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_name': 4, 'name': 4, 'ori_name': 4, 'attr': 4,
'inputs': 4, 'output_index': 4, 'options': _mock_config(loss_fusion
=MSELoss()), 'device': 0}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from numbers import Number
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_neg_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_neg_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
if not isinstance(eps, Number):
if not isinstance(eps, torch.Tensor):
self.eps = torch.tensor(eps)
else:
self.eps = eps
if len(self.eps.shape) == 1:
self.eps = torch.diag(self.eps)
assert self.eps.shape[0] == self.eps.shape[1
], 'Argument [eps] must form a n by n square matrix.'
self.norm = 2
else:
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
if isinstance(self.eps, Number):
deviation = A.norm(self.dual_norm, -1) * self.eps
else:
deviation = A.matmul(self.eps.transpose(0, 1)).norm(
self.dual_norm, -1)
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
elif isinstance(self.eps, Number):
bound = x + sign * self.eps
else:
bound = x + sign * self.eps.transpose(0, 1).norm(self.
dual_norm, -1)
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
center = (x_U + x_L) / 2.0
diff = (x_U - x_L) / 2.0
if not A.identity == 1:
unfold_input = F.unfold(center, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound = prod.view(prod.size(0), prod.size(1), int(math.
sqrt(prod.size(2))), int(math.sqrt(prod.size(2))))
unfold_input = F.unfold(diff, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches.abs()
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound += sign * prod.view(prod.size(0), prod.size(1),
int(math.sqrt(prod.size(2))), int(math.sqrt(prod.
size(2))))
else:
bound = center + sign * diff
return bound
else:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
raise NotImplementedError()
if isinstance(A, eyeC) or isinstance(A, torch.Tensor):
return concretize_matrix(A)
elif isinstance(A, Patches):
return concretize_patches(A)
elif isinstance(A, BoundList):
for b in A.bound_list:
if isinstance(b, eyeC) or isinstance(b, torch.Tensor):
pass
else:
raise NotImplementedError()
def init(self, x, aux=None, forward=False):
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
else:
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
if self.norm == np.inf:
if self.x_L is None and self.x_U is None:
return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps)
else:
return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})'
.format(self.eps, self.x_L, self.x_U))
else:
return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm,
self.eps)
class PerturbationSynonym(Perturbation):
def __init__(self, budget, eps=1.0, use_simple=False):
super(PerturbationSynonym, self).__init__()
self._load_synonyms()
self.budget = budget
self.eps = eps
self.use_simple = use_simple
self.model = None
self.train = False
def __repr__(self):
return (
'perturbation(Synonym-based word substitution budget={}, eps={})'
.format(self.budget, self.eps))
def _load_synonyms(self, path='data/synonyms.json'):
with open(path) as file:
self.synonym = json.loads(file.read())
logger.info('Synonym list loaded for {} words'.format(len(self.
synonym)))
def set_train(self, train):
self.train = train
def concretize(self, x, A, sign, aux):
assert self.model is not None
x_rep, mask, can_be_replaced = aux
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
dim_out = A.shape[1]
max_num_cand = x_rep.shape[2]
mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32,
device=A.device)
num_pos = int(np.max(np.sum(can_be_replaced, axis=-1)))
update_A = A.shape[-1] > num_pos * dim_word
if update_A:
bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape(
batch_size, -1, 1)).squeeze(-1)
else:
bias = 0.0
A = A.reshape(batch_size, dim_out, -1, dim_word)
A_new, x_new, x_rep_new, mask_new = [], [], [], []
zeros_A = torch.zeros(dim_out, dim_word, device=A.device)
zeros_w = torch.zeros(dim_word, device=A.device)
zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device)
zeros_mask = torch.zeros(max_num_cand, device=A.device)
for t in range(batch_size):
cnt = 0
for i in range(0, length):
if can_be_replaced[t][i]:
if update_A:
A_new.append(A[t, :, i, :])
x_new.append(x[t][i])
x_rep_new.append(x_rep[t][i])
mask_new.append(mask[t][i])
cnt += 1
if update_A:
A_new += [zeros_A] * (num_pos - cnt)
x_new += [zeros_w] * (num_pos - cnt)
x_rep_new += [zeros_rep] * (num_pos - cnt)
mask_new += [zeros_mask] * (num_pos - cnt)
if update_A:
A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word
).transpose(1, 2)
x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word)
x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos,
max_num_cand, dim_word)
mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand)
length = num_pos
A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2)
x = x.reshape(batch_size, length, -1, 1)
if sign == 1:
cmp, init = torch.max, -1e+30
else:
cmp, init = torch.min, 1e+30
init_tensor = torch.ones(batch_size, dim_out) * init
dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1)
]
dp[0][0] = torch.zeros(batch_size, dim_out)
A = A.reshape(batch_size * length, A.shape[2], A.shape[3])
Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x.
shape[3])).reshape(batch_size, length, A.shape[1])
Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length,
max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size,
length, A.shape[1], max_num_cand)
Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2)
Ax_rep_bound = cmp(Ax_rep, dim=-1).values
if self.use_simple and self.train:
return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias
for i in range(1, length + 1):
dp[i][0] = dp[i - 1][0] + Ax[:, i - 1]
for j in range(1, self.budget + 1):
dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1
] + Ax_rep_bound[:, i - 1])
dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1,
batch_size, dim_out)
return cmp(dp, dim=0).values + bias
def init(self, x, aux=None, forward=False):
tokens, batch = aux
self.tokens = tokens
assert len(x.shape) == 3
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
max_pos = 1
can_be_replaced = np.zeros((batch_size, length), dtype=np.bool)
self._build_substitution(batch)
for t in range(batch_size):
cnt = 0
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
for i in range(len(tokens[t])):
if tokens[t][i] == '[UNK]' or len(candidates[i]
) == 0 or tokens[t][i] != candidates[i][0]:
continue
for w in candidates[i][1:]:
if w in self.model.vocab:
can_be_replaced[t][i] = True
cnt += 1
break
max_pos = max(max_pos, cnt)
dim = max_pos * dim_word
if forward:
eye = torch.eye(dim_word)
lw = torch.zeros(batch_size, dim, length, dim_word)
lb = torch.zeros_like(x)
word_embeddings = self.model.word_embeddings.weight
vocab = self.model.vocab
x_rep = [[[] for i in range(length)] for t in range(batch_size)]
max_num_cand = 1
for t in range(batch_size):
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
cnt = 0
for i in range(length):
if can_be_replaced[t][i]:
word_embed = word_embeddings[vocab[tokens[t][i]]]
other_embed = x[t, i] - word_embed
if forward:
lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye
lb[t, i, :] = torch.zeros_like(word_embed)
for w in candidates[i][1:]:
if w in self.model.vocab:
x_rep[t][i].append(word_embeddings[self.model.
vocab[w]] + other_embed)
max_num_cand = max(max_num_cand, len(x_rep[t][i]))
cnt += 1
elif forward:
lb[t, i, :] = x[t, i, :]
if forward:
uw, ub = lw, lb
else:
lw = lb = uw = ub = None
zeros = torch.zeros(dim_word, device=x.device)
x_rep_, mask = [], []
for t in range(batch_size):
for i in range(length):
x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep
[t][i]))
mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len(
x_rep[t][i]))
x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand,
dim_word)
mask = torch.tensor(mask, dtype=torch.float32, device=x.device
).reshape(batch_size, length, max_num_cand)
x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps)
inf = 1e+20
lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * inf, dim=2).values
upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * -inf, dim=2).values
lower = torch.min(lower, x)
upper = torch.max(upper, x)
return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask,
can_be_replaced)
def _build_substitution(self, batch):
for t, example in enumerate(batch):
if 'candidates' not in example or example['candidates'] is None:
candidates = []
tokens = example['sentence'].strip().lower().split(' ')
for i in range(len(tokens)):
_cand = []
if tokens[i] in self.synonym:
for w in self.synonym[tokens[i]]:
if w in self.model.vocab:
_cand.append(w)
if len(_cand) > 0:
_cand = [tokens[i]] + _cand
candidates.append(_cand)
example['candidates'] = candidates
class Interval(tuple):
def __new__(self, lb=None, ub=None, ptb=None):
if ub is None:
assert isinstance(lb, tuple)
lb, ub = lb
return tuple.__new__(Interval, (lb, ub))
def __init__(self, lb, ub, ptb=None):
if ptb is None:
self.ptb = None
assert lb is ub
elif not isinstance(ptb, Perturbation):
raise ValueError(
'ptb must be a Perturbation object or None. Got type {}'.
format(type(ptb)))
else:
self.ptb = ptb
def __str__(self):
return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb)
def __repr__(self):
return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1],
self.ptb)
"""Checking if the other interval is tuple, keep the perturbation."""
@staticmethod
def make_interval(lb, ub, other):
if isinstance(other, Interval):
return Interval(lb, ub, other.ptb)
else:
return lb, ub
"""Given a tuple or Interval object, returns the norm and eps."""
@staticmethod
def get_perturbation(interval):
if isinstance(interval, Interval):
if isinstance(interval.ptb, PerturbationLpNorm):
return interval.ptb.norm, interval.ptb.eps
elif isinstance(interval.ptb, PerturbationSynonym):
return np.inf, 1.0
elif isinstance(interval.ptb, PerturbationL0Norm):
return 0, interval.ptb.eps, interval.ptb.ratio
elif interval.ptb is None:
raise RuntimeError(
'get_perturbation() encountered an interval that is not perturbed.'
)
else:
raise RuntimeError(
'get_perturbation() does not know how to handle {}'.
format(type(interval.ptb)))
else:
return np.inf, np.nan
"""Checking if a Interval or tuple object has perturbation enabled."""
@staticmethod
def is_perturbed(interval):
if isinstance(interval, Interval) and interval.ptb is None:
return False
else:
return True
class Bound(nn.Module):
def __init__(self, input_name, name, ori_name, attr={}, inputs=[],
output_index=0, options={}, device=None):
super().__init__()
self.output_name = []
(self.input_name, self.name, self.ori_name, self.attr, self.inputs,
self.output_index, self.options, self.device) = (input_name,
name, ori_name, attr, inputs, output_index, options, device)
self.fv = None
self.from_input = False
self.bounded = False
self.IBP_rets = None
self.perturbed = False
if options is not None and 'loss_fusion' in options:
self.loss_fusion = options['loss_fusion']
else:
self.loss_fusion = False
"""Check if the i-th input is with perturbation or not."""
def is_input_perturbed(self, i=0):
return self.inputs[i].perturbed
def forward(self, *x):
raise NotImplementedError
def interval_propagate(self, *v):
assert len(v) == 1
h_L, h_U = v[0]
return Interval.make_interval(self.forward(h_L), self.forward(h_U),
v[0])
def bound_forward(self, dim_in, last):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA):
raise NotImplementedError
def infer_batch_dim(self, batch_size, *x):
None
raise NotImplementedError
def broadcast_backward(self, A, x):
shape = x.default_shape
batch_dim = max(self.batch_dim, 0)
if isinstance(A, torch.Tensor):
if x.batch_dim == -1:
shape = torch.Size([A.shape[batch_dim + 1]] + list(shape))
dims = []
cnt_sum = A.ndim - len(shape) - 1
for i in range(1, A.ndim):
if i != self.batch_dim + 1 and cnt_sum > 0:
dims.append(i)
cnt_sum -= 1
if dims:
A = torch.sum(A, dim=dims)
else:
dims = list(range(1, 1 + A.ndim - 1 - len(shape)))
if dims:
A = torch.sum(A, dim=dims)
dims = []
for i in range(len(shape)):
if shape[i] == 1 and A.shape[i + 1] != 1:
dims.append(i + 1)
if dims:
A = torch.sum(A, dim=dims, keepdim=True)
assert A.shape[1:] == shape
elif type(A) == Patches:
pass
return A
@staticmethod
def broadcast_forward(dim_in, x, shape_res):
lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub
shape_x, shape_res = list(x.lb.shape), list(shape_res)
if lw is None:
lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device)
has_batch_size = False
else:
has_batch_size = True
while len(shape_x) < len(shape_res):
if not has_batch_size:
lw, uw = lw.unsqueeze(0), uw.unsqueeze(0)
lb, ub = lb.unsqueeze(0), ub.unsqueeze(0)
shape_x = [1] + shape_x
has_batch_size = True
else:
lw, uw = lw.unsqueeze(2), uw.unsqueeze(2)
lb, ub = lb.unsqueeze(1), ub.unsqueeze(1)
shape_x = [shape_x[0], 1] + shape_x[1:]
repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))]
lb, ub = lb.repeat(*repeat), ub.repeat(*repeat)
repeat = repeat[:1] + [1] + repeat[1:]
lw, uw = lw.repeat(*repeat), uw.repeat(*repeat)
return lw, lb, uw, ub
def get_bias(self, A, bias):
if A is None:
return 0
assert not isnan(A)
assert not isnan(bias)
if isinstance(A, torch.Tensor):
if torch.norm(A, p=1) < epsilon:
return 0
output_dim = A.shape[0]
if self.batch_dim != -1:
batch_size = A.shape[self.batch_dim + 1]
A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1
]).astype(np.int32), batch_size, np.prod(A.shape[self.
batch_dim + 2:]).astype(np.int32)]
A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size,
output_dim, -1)
bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape(
batch_size, -1, 1)
bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1)
else:
batch_size = A.shape[1]
A = A.view(output_dim, batch_size, -1)
bias_new = A.matmul(bias.view(-1))
if isnan(bias_new):
return 0
else:
return bias_new
elif type(A) == Patches:
if torch.norm(A.patches, p=1) < epsilon:
return 0
if self.batch_dim != -1:
batch_size = bias.shape[0]
bias = F.unfold(bias, kernel_size=A.patches.size(-1),
stride=A.stride, padding=A.padding).transpose(-2, -1
).unsqueeze(-2)
bias.size(1)
patches = A.patches.view(A.patches.size(0), A.patches.size(
1), A.patches.size(-4), A.patches.size(-1) * A.patches.
size(-2) * A.patches.size(-3))
prod = bias * patches
bias_new = prod.sum(-1).transpose(-2, -1)
bias_new = bias_new.view(batch_size, bias_new.size(-2), int
(math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new.
size(-1))))
else:
patches = A.patches
patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias
patches_reshape = patches_reshape.transpose(-1, -2)
return patches_reshape.view(patches_reshape.size(0),
patches_reshape.size(1), int(math.sqrt(patches_reshape.
size(2))), -1).transpose(0, 1)
return bias_new
else:
return NotImplementedError()
class BoundNegNew(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
def bound_backward(self, last_lA, last_uA, x):
return [(-last_lA if last_lA is not None else None, -last_uA if
last_uA is not None else None)], 0, 0
def bound_forward(self, dim_in, x):
return LinearBound(-x.lw, -x.lb, -x.uw, -x.ub)
def interval_propagate(self, *v):
return -v[0][1], -v[0][0]
def infer_batch_dim(self, batch_size, *x):
return x[0]
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Mahoumaru/auto_LiRPA | BoundNeg | false | 13,221 | [
"BSD-3-Clause"
]
| 0 | b03a6c36eb1b921726778359d6d2b94e0cd7e480 | https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480 |
BoundPow | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/rw/crw3qydkgjmifxnk2lsi7cnidtaocksbhyv6htmtnrfv2zqr2lbi.py
# Topologically Sorted Source Nodes: [pow_1], Original ATen: [aten.pow]
# Source node to ATen node mapping:
# pow_1 => pow_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
triton_poi_fused_pow_0 = async_compile.triton('triton_poi_fused_pow_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_pow_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = libdevice.pow(tmp0, tmp1)
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1], Original ATen: [aten.pow]
stream0 = get_raw_stream(0)
triton_poi_fused_pow_0.run(arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from numbers import Number
from torch.nn import MSELoss
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
if not isinstance(eps, Number):
if not isinstance(eps, torch.Tensor):
self.eps = torch.tensor(eps)
else:
self.eps = eps
if len(self.eps.shape) == 1:
self.eps = torch.diag(self.eps)
assert self.eps.shape[0] == self.eps.shape[1
], 'Argument [eps] must form a n by n square matrix.'
self.norm = 2
else:
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
if isinstance(self.eps, Number):
deviation = A.norm(self.dual_norm, -1) * self.eps
else:
deviation = A.matmul(self.eps.transpose(0, 1)).norm(
self.dual_norm, -1)
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
elif isinstance(self.eps, Number):
bound = x + sign * self.eps
else:
bound = x + sign * self.eps.transpose(0, 1).norm(self.
dual_norm, -1)
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
center = (x_U + x_L) / 2.0
diff = (x_U - x_L) / 2.0
if not A.identity == 1:
unfold_input = F.unfold(center, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound = prod.view(prod.size(0), prod.size(1), int(math.
sqrt(prod.size(2))), int(math.sqrt(prod.size(2))))
unfold_input = F.unfold(diff, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches.abs()
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound += sign * prod.view(prod.size(0), prod.size(1),
int(math.sqrt(prod.size(2))), int(math.sqrt(prod.
size(2))))
else:
bound = center + sign * diff
return bound
else:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
raise NotImplementedError()
if isinstance(A, eyeC) or isinstance(A, torch.Tensor):
return concretize_matrix(A)
elif isinstance(A, Patches):
return concretize_patches(A)
elif isinstance(A, BoundList):
for b in A.bound_list:
if isinstance(b, eyeC) or isinstance(b, torch.Tensor):
pass
else:
raise NotImplementedError()
def init(self, x, aux=None, forward=False):
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
else:
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
if self.norm == np.inf:
if self.x_L is None and self.x_U is None:
return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps)
else:
return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})'
.format(self.eps, self.x_L, self.x_U))
else:
return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm,
self.eps)
class PerturbationSynonym(Perturbation):
def __init__(self, budget, eps=1.0, use_simple=False):
super(PerturbationSynonym, self).__init__()
self._load_synonyms()
self.budget = budget
self.eps = eps
self.use_simple = use_simple
self.model = None
self.train = False
def __repr__(self):
return (
'perturbation(Synonym-based word substitution budget={}, eps={})'
.format(self.budget, self.eps))
def _load_synonyms(self, path='data/synonyms.json'):
with open(path) as file:
self.synonym = json.loads(file.read())
logger.info('Synonym list loaded for {} words'.format(len(self.
synonym)))
def set_train(self, train):
self.train = train
def concretize(self, x, A, sign, aux):
assert self.model is not None
x_rep, mask, can_be_replaced = aux
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
dim_out = A.shape[1]
max_num_cand = x_rep.shape[2]
mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32,
device=A.device)
num_pos = int(np.max(np.sum(can_be_replaced, axis=-1)))
update_A = A.shape[-1] > num_pos * dim_word
if update_A:
bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape(
batch_size, -1, 1)).squeeze(-1)
else:
bias = 0.0
A = A.reshape(batch_size, dim_out, -1, dim_word)
A_new, x_new, x_rep_new, mask_new = [], [], [], []
zeros_A = torch.zeros(dim_out, dim_word, device=A.device)
zeros_w = torch.zeros(dim_word, device=A.device)
zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device)
zeros_mask = torch.zeros(max_num_cand, device=A.device)
for t in range(batch_size):
cnt = 0
for i in range(0, length):
if can_be_replaced[t][i]:
if update_A:
A_new.append(A[t, :, i, :])
x_new.append(x[t][i])
x_rep_new.append(x_rep[t][i])
mask_new.append(mask[t][i])
cnt += 1
if update_A:
A_new += [zeros_A] * (num_pos - cnt)
x_new += [zeros_w] * (num_pos - cnt)
x_rep_new += [zeros_rep] * (num_pos - cnt)
mask_new += [zeros_mask] * (num_pos - cnt)
if update_A:
A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word
).transpose(1, 2)
x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word)
x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos,
max_num_cand, dim_word)
mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand)
length = num_pos
A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2)
x = x.reshape(batch_size, length, -1, 1)
if sign == 1:
cmp, init = torch.max, -1e+30
else:
cmp, init = torch.min, 1e+30
init_tensor = torch.ones(batch_size, dim_out) * init
dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1)
]
dp[0][0] = torch.zeros(batch_size, dim_out)
A = A.reshape(batch_size * length, A.shape[2], A.shape[3])
Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x.
shape[3])).reshape(batch_size, length, A.shape[1])
Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length,
max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size,
length, A.shape[1], max_num_cand)
Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2)
Ax_rep_bound = cmp(Ax_rep, dim=-1).values
if self.use_simple and self.train:
return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias
for i in range(1, length + 1):
dp[i][0] = dp[i - 1][0] + Ax[:, i - 1]
for j in range(1, self.budget + 1):
dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1
] + Ax_rep_bound[:, i - 1])
dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1,
batch_size, dim_out)
return cmp(dp, dim=0).values + bias
def init(self, x, aux=None, forward=False):
tokens, batch = aux
self.tokens = tokens
assert len(x.shape) == 3
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
max_pos = 1
can_be_replaced = np.zeros((batch_size, length), dtype=np.bool)
self._build_substitution(batch)
for t in range(batch_size):
cnt = 0
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
for i in range(len(tokens[t])):
if tokens[t][i] == '[UNK]' or len(candidates[i]
) == 0 or tokens[t][i] != candidates[i][0]:
continue
for w in candidates[i][1:]:
if w in self.model.vocab:
can_be_replaced[t][i] = True
cnt += 1
break
max_pos = max(max_pos, cnt)
dim = max_pos * dim_word
if forward:
eye = torch.eye(dim_word)
lw = torch.zeros(batch_size, dim, length, dim_word)
lb = torch.zeros_like(x)
word_embeddings = self.model.word_embeddings.weight
vocab = self.model.vocab
x_rep = [[[] for i in range(length)] for t in range(batch_size)]
max_num_cand = 1
for t in range(batch_size):
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
cnt = 0
for i in range(length):
if can_be_replaced[t][i]:
word_embed = word_embeddings[vocab[tokens[t][i]]]
other_embed = x[t, i] - word_embed
if forward:
lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye
lb[t, i, :] = torch.zeros_like(word_embed)
for w in candidates[i][1:]:
if w in self.model.vocab:
x_rep[t][i].append(word_embeddings[self.model.
vocab[w]] + other_embed)
max_num_cand = max(max_num_cand, len(x_rep[t][i]))
cnt += 1
elif forward:
lb[t, i, :] = x[t, i, :]
if forward:
uw, ub = lw, lb
else:
lw = lb = uw = ub = None
zeros = torch.zeros(dim_word, device=x.device)
x_rep_, mask = [], []
for t in range(batch_size):
for i in range(length):
x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep
[t][i]))
mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len(
x_rep[t][i]))
x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand,
dim_word)
mask = torch.tensor(mask, dtype=torch.float32, device=x.device
).reshape(batch_size, length, max_num_cand)
x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps)
inf = 1e+20
lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * inf, dim=2).values
upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * -inf, dim=2).values
lower = torch.min(lower, x)
upper = torch.max(upper, x)
return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask,
can_be_replaced)
def _build_substitution(self, batch):
for t, example in enumerate(batch):
if 'candidates' not in example or example['candidates'] is None:
candidates = []
tokens = example['sentence'].strip().lower().split(' ')
for i in range(len(tokens)):
_cand = []
if tokens[i] in self.synonym:
for w in self.synonym[tokens[i]]:
if w in self.model.vocab:
_cand.append(w)
if len(_cand) > 0:
_cand = [tokens[i]] + _cand
candidates.append(_cand)
example['candidates'] = candidates
class Interval(tuple):
def __new__(self, lb=None, ub=None, ptb=None):
if ub is None:
assert isinstance(lb, tuple)
lb, ub = lb
return tuple.__new__(Interval, (lb, ub))
def __init__(self, lb, ub, ptb=None):
if ptb is None:
self.ptb = None
assert lb is ub
elif not isinstance(ptb, Perturbation):
raise ValueError(
'ptb must be a Perturbation object or None. Got type {}'.
format(type(ptb)))
else:
self.ptb = ptb
def __str__(self):
return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb)
def __repr__(self):
return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1],
self.ptb)
"""Checking if the other interval is tuple, keep the perturbation."""
@staticmethod
def make_interval(lb, ub, other):
if isinstance(other, Interval):
return Interval(lb, ub, other.ptb)
else:
return lb, ub
"""Given a tuple or Interval object, returns the norm and eps."""
@staticmethod
def get_perturbation(interval):
if isinstance(interval, Interval):
if isinstance(interval.ptb, PerturbationLpNorm):
return interval.ptb.norm, interval.ptb.eps
elif isinstance(interval.ptb, PerturbationSynonym):
return np.inf, 1.0
elif isinstance(interval.ptb, PerturbationL0Norm):
return 0, interval.ptb.eps, interval.ptb.ratio
elif interval.ptb is None:
raise RuntimeError(
'get_perturbation() encountered an interval that is not perturbed.'
)
else:
raise RuntimeError(
'get_perturbation() does not know how to handle {}'.
format(type(interval.ptb)))
else:
return np.inf, np.nan
"""Checking if a Interval or tuple object has perturbation enabled."""
@staticmethod
def is_perturbed(interval):
if isinstance(interval, Interval) and interval.ptb is None:
return False
else:
return True
class Bound(nn.Module):
def __init__(self, input_name, name, ori_name, attr={}, inputs=[],
output_index=0, options={}, device=None):
super().__init__()
self.output_name = []
(self.input_name, self.name, self.ori_name, self.attr, self.inputs,
self.output_index, self.options, self.device) = (input_name,
name, ori_name, attr, inputs, output_index, options, device)
self.fv = None
self.from_input = False
self.bounded = False
self.IBP_rets = None
self.perturbed = False
if options is not None and 'loss_fusion' in options:
self.loss_fusion = options['loss_fusion']
else:
self.loss_fusion = False
"""Check if the i-th input is with perturbation or not."""
def is_input_perturbed(self, i=0):
return self.inputs[i].perturbed
def forward(self, *x):
raise NotImplementedError
def interval_propagate(self, *v):
assert len(v) == 1
h_L, h_U = v[0]
return Interval.make_interval(self.forward(h_L), self.forward(h_U),
v[0])
def bound_forward(self, dim_in, last):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA):
raise NotImplementedError
def infer_batch_dim(self, batch_size, *x):
None
raise NotImplementedError
def broadcast_backward(self, A, x):
shape = x.default_shape
batch_dim = max(self.batch_dim, 0)
if isinstance(A, torch.Tensor):
if x.batch_dim == -1:
shape = torch.Size([A.shape[batch_dim + 1]] + list(shape))
dims = []
cnt_sum = A.ndim - len(shape) - 1
for i in range(1, A.ndim):
if i != self.batch_dim + 1 and cnt_sum > 0:
dims.append(i)
cnt_sum -= 1
if dims:
A = torch.sum(A, dim=dims)
else:
dims = list(range(1, 1 + A.ndim - 1 - len(shape)))
if dims:
A = torch.sum(A, dim=dims)
dims = []
for i in range(len(shape)):
if shape[i] == 1 and A.shape[i + 1] != 1:
dims.append(i + 1)
if dims:
A = torch.sum(A, dim=dims, keepdim=True)
assert A.shape[1:] == shape
elif type(A) == Patches:
pass
return A
@staticmethod
def broadcast_forward(dim_in, x, shape_res):
lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub
shape_x, shape_res = list(x.lb.shape), list(shape_res)
if lw is None:
lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device)
has_batch_size = False
else:
has_batch_size = True
while len(shape_x) < len(shape_res):
if not has_batch_size:
lw, uw = lw.unsqueeze(0), uw.unsqueeze(0)
lb, ub = lb.unsqueeze(0), ub.unsqueeze(0)
shape_x = [1] + shape_x
has_batch_size = True
else:
lw, uw = lw.unsqueeze(2), uw.unsqueeze(2)
lb, ub = lb.unsqueeze(1), ub.unsqueeze(1)
shape_x = [shape_x[0], 1] + shape_x[1:]
repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))]
lb, ub = lb.repeat(*repeat), ub.repeat(*repeat)
repeat = repeat[:1] + [1] + repeat[1:]
lw, uw = lw.repeat(*repeat), uw.repeat(*repeat)
return lw, lb, uw, ub
def get_bias(self, A, bias):
if A is None:
return 0
assert not isnan(A)
assert not isnan(bias)
if isinstance(A, torch.Tensor):
if torch.norm(A, p=1) < epsilon:
return 0
output_dim = A.shape[0]
if self.batch_dim != -1:
batch_size = A.shape[self.batch_dim + 1]
A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1
]).astype(np.int32), batch_size, np.prod(A.shape[self.
batch_dim + 2:]).astype(np.int32)]
A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size,
output_dim, -1)
bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape(
batch_size, -1, 1)
bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1)
else:
batch_size = A.shape[1]
A = A.view(output_dim, batch_size, -1)
bias_new = A.matmul(bias.view(-1))
if isnan(bias_new):
return 0
else:
return bias_new
elif type(A) == Patches:
if torch.norm(A.patches, p=1) < epsilon:
return 0
if self.batch_dim != -1:
batch_size = bias.shape[0]
bias = F.unfold(bias, kernel_size=A.patches.size(-1),
stride=A.stride, padding=A.padding).transpose(-2, -1
).unsqueeze(-2)
bias.size(1)
patches = A.patches.view(A.patches.size(0), A.patches.size(
1), A.patches.size(-4), A.patches.size(-1) * A.patches.
size(-2) * A.patches.size(-3))
prod = bias * patches
bias_new = prod.sum(-1).transpose(-2, -1)
bias_new = bias_new.view(batch_size, bias_new.size(-2), int
(math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new.
size(-1))))
else:
patches = A.patches
patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias
patches_reshape = patches_reshape.transpose(-1, -2)
return patches_reshape.view(patches_reshape.size(0),
patches_reshape.size(1), int(math.sqrt(patches_reshape.
size(2))), -1).transpose(0, 1)
return bias_new
else:
return NotImplementedError()
class BoundPow(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
self.nonlinear = True
def forward(self, x, y):
return torch.pow(x, y)
def interval_propagate(self, *v):
assert not self.is_input_perturbed(1)
exp = v[1][0]
assert exp == int(exp)
exp = int(exp)
pl, pu = torch.pow(v[0][0], exp), torch.pow(v[0][1], exp)
if exp % 2 == 1:
return pl, pu
else:
pl, pu = torch.min(pl, pu), torch.max(pl, pu)
mask = 1 - ((v[0][0] < 0) * (v[0][1] > 0)).float()
return pl * mask, pu
def infer_batch_dim(self, batch_size, *x):
return x[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_name': 4, 'name': 4, 'ori_name': 4, 'attr': 4,
'inputs': 4, 'output_index': 4, 'options': _mock_config(loss_fusion
=MSELoss()), 'device': 0}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from numbers import Number
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_pow_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = libdevice.pow(tmp0, tmp1)
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pow_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
if not isinstance(eps, Number):
if not isinstance(eps, torch.Tensor):
self.eps = torch.tensor(eps)
else:
self.eps = eps
if len(self.eps.shape) == 1:
self.eps = torch.diag(self.eps)
assert self.eps.shape[0] == self.eps.shape[1
], 'Argument [eps] must form a n by n square matrix.'
self.norm = 2
else:
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
if isinstance(self.eps, Number):
deviation = A.norm(self.dual_norm, -1) * self.eps
else:
deviation = A.matmul(self.eps.transpose(0, 1)).norm(
self.dual_norm, -1)
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
elif isinstance(self.eps, Number):
bound = x + sign * self.eps
else:
bound = x + sign * self.eps.transpose(0, 1).norm(self.
dual_norm, -1)
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
center = (x_U + x_L) / 2.0
diff = (x_U - x_L) / 2.0
if not A.identity == 1:
unfold_input = F.unfold(center, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound = prod.view(prod.size(0), prod.size(1), int(math.
sqrt(prod.size(2))), int(math.sqrt(prod.size(2))))
unfold_input = F.unfold(diff, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches.abs()
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound += sign * prod.view(prod.size(0), prod.size(1),
int(math.sqrt(prod.size(2))), int(math.sqrt(prod.
size(2))))
else:
bound = center + sign * diff
return bound
else:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
raise NotImplementedError()
if isinstance(A, eyeC) or isinstance(A, torch.Tensor):
return concretize_matrix(A)
elif isinstance(A, Patches):
return concretize_patches(A)
elif isinstance(A, BoundList):
for b in A.bound_list:
if isinstance(b, eyeC) or isinstance(b, torch.Tensor):
pass
else:
raise NotImplementedError()
def init(self, x, aux=None, forward=False):
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
else:
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
if self.norm == np.inf:
if self.x_L is None and self.x_U is None:
return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps)
else:
return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})'
.format(self.eps, self.x_L, self.x_U))
else:
return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm,
self.eps)
class PerturbationSynonym(Perturbation):
def __init__(self, budget, eps=1.0, use_simple=False):
super(PerturbationSynonym, self).__init__()
self._load_synonyms()
self.budget = budget
self.eps = eps
self.use_simple = use_simple
self.model = None
self.train = False
def __repr__(self):
return (
'perturbation(Synonym-based word substitution budget={}, eps={})'
.format(self.budget, self.eps))
def _load_synonyms(self, path='data/synonyms.json'):
with open(path) as file:
self.synonym = json.loads(file.read())
logger.info('Synonym list loaded for {} words'.format(len(self.
synonym)))
def set_train(self, train):
self.train = train
def concretize(self, x, A, sign, aux):
assert self.model is not None
x_rep, mask, can_be_replaced = aux
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
dim_out = A.shape[1]
max_num_cand = x_rep.shape[2]
mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32,
device=A.device)
num_pos = int(np.max(np.sum(can_be_replaced, axis=-1)))
update_A = A.shape[-1] > num_pos * dim_word
if update_A:
bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape(
batch_size, -1, 1)).squeeze(-1)
else:
bias = 0.0
A = A.reshape(batch_size, dim_out, -1, dim_word)
A_new, x_new, x_rep_new, mask_new = [], [], [], []
zeros_A = torch.zeros(dim_out, dim_word, device=A.device)
zeros_w = torch.zeros(dim_word, device=A.device)
zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device)
zeros_mask = torch.zeros(max_num_cand, device=A.device)
for t in range(batch_size):
cnt = 0
for i in range(0, length):
if can_be_replaced[t][i]:
if update_A:
A_new.append(A[t, :, i, :])
x_new.append(x[t][i])
x_rep_new.append(x_rep[t][i])
mask_new.append(mask[t][i])
cnt += 1
if update_A:
A_new += [zeros_A] * (num_pos - cnt)
x_new += [zeros_w] * (num_pos - cnt)
x_rep_new += [zeros_rep] * (num_pos - cnt)
mask_new += [zeros_mask] * (num_pos - cnt)
if update_A:
A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word
).transpose(1, 2)
x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word)
x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos,
max_num_cand, dim_word)
mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand)
length = num_pos
A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2)
x = x.reshape(batch_size, length, -1, 1)
if sign == 1:
cmp, init = torch.max, -1e+30
else:
cmp, init = torch.min, 1e+30
init_tensor = torch.ones(batch_size, dim_out) * init
dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1)
]
dp[0][0] = torch.zeros(batch_size, dim_out)
A = A.reshape(batch_size * length, A.shape[2], A.shape[3])
Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x.
shape[3])).reshape(batch_size, length, A.shape[1])
Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length,
max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size,
length, A.shape[1], max_num_cand)
Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2)
Ax_rep_bound = cmp(Ax_rep, dim=-1).values
if self.use_simple and self.train:
return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias
for i in range(1, length + 1):
dp[i][0] = dp[i - 1][0] + Ax[:, i - 1]
for j in range(1, self.budget + 1):
dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1
] + Ax_rep_bound[:, i - 1])
dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1,
batch_size, dim_out)
return cmp(dp, dim=0).values + bias
def init(self, x, aux=None, forward=False):
tokens, batch = aux
self.tokens = tokens
assert len(x.shape) == 3
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
max_pos = 1
can_be_replaced = np.zeros((batch_size, length), dtype=np.bool)
self._build_substitution(batch)
for t in range(batch_size):
cnt = 0
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
for i in range(len(tokens[t])):
if tokens[t][i] == '[UNK]' or len(candidates[i]
) == 0 or tokens[t][i] != candidates[i][0]:
continue
for w in candidates[i][1:]:
if w in self.model.vocab:
can_be_replaced[t][i] = True
cnt += 1
break
max_pos = max(max_pos, cnt)
dim = max_pos * dim_word
if forward:
eye = torch.eye(dim_word)
lw = torch.zeros(batch_size, dim, length, dim_word)
lb = torch.zeros_like(x)
word_embeddings = self.model.word_embeddings.weight
vocab = self.model.vocab
x_rep = [[[] for i in range(length)] for t in range(batch_size)]
max_num_cand = 1
for t in range(batch_size):
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
cnt = 0
for i in range(length):
if can_be_replaced[t][i]:
word_embed = word_embeddings[vocab[tokens[t][i]]]
other_embed = x[t, i] - word_embed
if forward:
lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye
lb[t, i, :] = torch.zeros_like(word_embed)
for w in candidates[i][1:]:
if w in self.model.vocab:
x_rep[t][i].append(word_embeddings[self.model.
vocab[w]] + other_embed)
max_num_cand = max(max_num_cand, len(x_rep[t][i]))
cnt += 1
elif forward:
lb[t, i, :] = x[t, i, :]
if forward:
uw, ub = lw, lb
else:
lw = lb = uw = ub = None
zeros = torch.zeros(dim_word, device=x.device)
x_rep_, mask = [], []
for t in range(batch_size):
for i in range(length):
x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep
[t][i]))
mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len(
x_rep[t][i]))
x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand,
dim_word)
mask = torch.tensor(mask, dtype=torch.float32, device=x.device
).reshape(batch_size, length, max_num_cand)
x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps)
inf = 1e+20
lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * inf, dim=2).values
upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * -inf, dim=2).values
lower = torch.min(lower, x)
upper = torch.max(upper, x)
return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask,
can_be_replaced)
def _build_substitution(self, batch):
for t, example in enumerate(batch):
if 'candidates' not in example or example['candidates'] is None:
candidates = []
tokens = example['sentence'].strip().lower().split(' ')
for i in range(len(tokens)):
_cand = []
if tokens[i] in self.synonym:
for w in self.synonym[tokens[i]]:
if w in self.model.vocab:
_cand.append(w)
if len(_cand) > 0:
_cand = [tokens[i]] + _cand
candidates.append(_cand)
example['candidates'] = candidates
class Interval(tuple):
def __new__(self, lb=None, ub=None, ptb=None):
if ub is None:
assert isinstance(lb, tuple)
lb, ub = lb
return tuple.__new__(Interval, (lb, ub))
def __init__(self, lb, ub, ptb=None):
if ptb is None:
self.ptb = None
assert lb is ub
elif not isinstance(ptb, Perturbation):
raise ValueError(
'ptb must be a Perturbation object or None. Got type {}'.
format(type(ptb)))
else:
self.ptb = ptb
def __str__(self):
return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb)
def __repr__(self):
return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1],
self.ptb)
"""Checking if the other interval is tuple, keep the perturbation."""
@staticmethod
def make_interval(lb, ub, other):
if isinstance(other, Interval):
return Interval(lb, ub, other.ptb)
else:
return lb, ub
"""Given a tuple or Interval object, returns the norm and eps."""
@staticmethod
def get_perturbation(interval):
if isinstance(interval, Interval):
if isinstance(interval.ptb, PerturbationLpNorm):
return interval.ptb.norm, interval.ptb.eps
elif isinstance(interval.ptb, PerturbationSynonym):
return np.inf, 1.0
elif isinstance(interval.ptb, PerturbationL0Norm):
return 0, interval.ptb.eps, interval.ptb.ratio
elif interval.ptb is None:
raise RuntimeError(
'get_perturbation() encountered an interval that is not perturbed.'
)
else:
raise RuntimeError(
'get_perturbation() does not know how to handle {}'.
format(type(interval.ptb)))
else:
return np.inf, np.nan
"""Checking if a Interval or tuple object has perturbation enabled."""
@staticmethod
def is_perturbed(interval):
if isinstance(interval, Interval) and interval.ptb is None:
return False
else:
return True
class Bound(nn.Module):
def __init__(self, input_name, name, ori_name, attr={}, inputs=[],
output_index=0, options={}, device=None):
super().__init__()
self.output_name = []
(self.input_name, self.name, self.ori_name, self.attr, self.inputs,
self.output_index, self.options, self.device) = (input_name,
name, ori_name, attr, inputs, output_index, options, device)
self.fv = None
self.from_input = False
self.bounded = False
self.IBP_rets = None
self.perturbed = False
if options is not None and 'loss_fusion' in options:
self.loss_fusion = options['loss_fusion']
else:
self.loss_fusion = False
"""Check if the i-th input is with perturbation or not."""
def is_input_perturbed(self, i=0):
return self.inputs[i].perturbed
def forward(self, *x):
raise NotImplementedError
def interval_propagate(self, *v):
assert len(v) == 1
h_L, h_U = v[0]
return Interval.make_interval(self.forward(h_L), self.forward(h_U),
v[0])
def bound_forward(self, dim_in, last):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA):
raise NotImplementedError
def infer_batch_dim(self, batch_size, *x):
None
raise NotImplementedError
def broadcast_backward(self, A, x):
shape = x.default_shape
batch_dim = max(self.batch_dim, 0)
if isinstance(A, torch.Tensor):
if x.batch_dim == -1:
shape = torch.Size([A.shape[batch_dim + 1]] + list(shape))
dims = []
cnt_sum = A.ndim - len(shape) - 1
for i in range(1, A.ndim):
if i != self.batch_dim + 1 and cnt_sum > 0:
dims.append(i)
cnt_sum -= 1
if dims:
A = torch.sum(A, dim=dims)
else:
dims = list(range(1, 1 + A.ndim - 1 - len(shape)))
if dims:
A = torch.sum(A, dim=dims)
dims = []
for i in range(len(shape)):
if shape[i] == 1 and A.shape[i + 1] != 1:
dims.append(i + 1)
if dims:
A = torch.sum(A, dim=dims, keepdim=True)
assert A.shape[1:] == shape
elif type(A) == Patches:
pass
return A
@staticmethod
def broadcast_forward(dim_in, x, shape_res):
lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub
shape_x, shape_res = list(x.lb.shape), list(shape_res)
if lw is None:
lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device)
has_batch_size = False
else:
has_batch_size = True
while len(shape_x) < len(shape_res):
if not has_batch_size:
lw, uw = lw.unsqueeze(0), uw.unsqueeze(0)
lb, ub = lb.unsqueeze(0), ub.unsqueeze(0)
shape_x = [1] + shape_x
has_batch_size = True
else:
lw, uw = lw.unsqueeze(2), uw.unsqueeze(2)
lb, ub = lb.unsqueeze(1), ub.unsqueeze(1)
shape_x = [shape_x[0], 1] + shape_x[1:]
repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))]
lb, ub = lb.repeat(*repeat), ub.repeat(*repeat)
repeat = repeat[:1] + [1] + repeat[1:]
lw, uw = lw.repeat(*repeat), uw.repeat(*repeat)
return lw, lb, uw, ub
def get_bias(self, A, bias):
if A is None:
return 0
assert not isnan(A)
assert not isnan(bias)
if isinstance(A, torch.Tensor):
if torch.norm(A, p=1) < epsilon:
return 0
output_dim = A.shape[0]
if self.batch_dim != -1:
batch_size = A.shape[self.batch_dim + 1]
A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1
]).astype(np.int32), batch_size, np.prod(A.shape[self.
batch_dim + 2:]).astype(np.int32)]
A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size,
output_dim, -1)
bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape(
batch_size, -1, 1)
bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1)
else:
batch_size = A.shape[1]
A = A.view(output_dim, batch_size, -1)
bias_new = A.matmul(bias.view(-1))
if isnan(bias_new):
return 0
else:
return bias_new
elif type(A) == Patches:
if torch.norm(A.patches, p=1) < epsilon:
return 0
if self.batch_dim != -1:
batch_size = bias.shape[0]
bias = F.unfold(bias, kernel_size=A.patches.size(-1),
stride=A.stride, padding=A.padding).transpose(-2, -1
).unsqueeze(-2)
bias.size(1)
patches = A.patches.view(A.patches.size(0), A.patches.size(
1), A.patches.size(-4), A.patches.size(-1) * A.patches.
size(-2) * A.patches.size(-3))
prod = bias * patches
bias_new = prod.sum(-1).transpose(-2, -1)
bias_new = bias_new.view(batch_size, bias_new.size(-2), int
(math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new.
size(-1))))
else:
patches = A.patches
patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias
patches_reshape = patches_reshape.transpose(-1, -2)
return patches_reshape.view(patches_reshape.size(0),
patches_reshape.size(1), int(math.sqrt(patches_reshape.
size(2))), -1).transpose(0, 1)
return bias_new
else:
return NotImplementedError()
class BoundPowNew(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
self.nonlinear = True
def interval_propagate(self, *v):
assert not self.is_input_perturbed(1)
exp = v[1][0]
assert exp == int(exp)
exp = int(exp)
pl, pu = torch.pow(v[0][0], exp), torch.pow(v[0][1], exp)
if exp % 2 == 1:
return pl, pu
else:
pl, pu = torch.min(pl, pu), torch.max(pl, pu)
mask = 1 - ((v[0][0] < 0) * (v[0][1] > 0)).float()
return pl * mask, pu
def infer_batch_dim(self, batch_size, *x):
return x[0]
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Mahoumaru/auto_LiRPA | BoundPow | false | 13,222 | [
"BSD-3-Clause"
]
| 0 | b03a6c36eb1b921726778359d6d2b94e0cd7e480 | https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480 |
BoundNot | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/qs/cqsrkz3gbpplm7lww4odi2i7zyfpi7judf4uepdd45jimhykbtq2.py
# Topologically Sorted Source Nodes: [logical_not], Original ATen: [aten.logical_not]
# Source node to ATen node mapping:
# logical_not => logical_not
# Graph fragment:
# %logical_not : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_logical_not_0 = async_compile.triton('triton_poi_fused_logical_not_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_logical_not_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_logical_not_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = (tmp0 != 0)
tmp2 = tmp1 == 0
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [logical_not], Original ATen: [aten.logical_not]
stream0 = get_raw_stream(0)
triton_poi_fused_logical_not_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from numbers import Number
from torch.nn import MSELoss
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
if not isinstance(eps, Number):
if not isinstance(eps, torch.Tensor):
self.eps = torch.tensor(eps)
else:
self.eps = eps
if len(self.eps.shape) == 1:
self.eps = torch.diag(self.eps)
assert self.eps.shape[0] == self.eps.shape[1
], 'Argument [eps] must form a n by n square matrix.'
self.norm = 2
else:
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
if isinstance(self.eps, Number):
deviation = A.norm(self.dual_norm, -1) * self.eps
else:
deviation = A.matmul(self.eps.transpose(0, 1)).norm(
self.dual_norm, -1)
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
elif isinstance(self.eps, Number):
bound = x + sign * self.eps
else:
bound = x + sign * self.eps.transpose(0, 1).norm(self.
dual_norm, -1)
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
center = (x_U + x_L) / 2.0
diff = (x_U - x_L) / 2.0
if not A.identity == 1:
unfold_input = F.unfold(center, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound = prod.view(prod.size(0), prod.size(1), int(math.
sqrt(prod.size(2))), int(math.sqrt(prod.size(2))))
unfold_input = F.unfold(diff, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches.abs()
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound += sign * prod.view(prod.size(0), prod.size(1),
int(math.sqrt(prod.size(2))), int(math.sqrt(prod.
size(2))))
else:
bound = center + sign * diff
return bound
else:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
raise NotImplementedError()
if isinstance(A, eyeC) or isinstance(A, torch.Tensor):
return concretize_matrix(A)
elif isinstance(A, Patches):
return concretize_patches(A)
elif isinstance(A, BoundList):
for b in A.bound_list:
if isinstance(b, eyeC) or isinstance(b, torch.Tensor):
pass
else:
raise NotImplementedError()
def init(self, x, aux=None, forward=False):
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
else:
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
if self.norm == np.inf:
if self.x_L is None and self.x_U is None:
return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps)
else:
return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})'
.format(self.eps, self.x_L, self.x_U))
else:
return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm,
self.eps)
class PerturbationSynonym(Perturbation):
def __init__(self, budget, eps=1.0, use_simple=False):
super(PerturbationSynonym, self).__init__()
self._load_synonyms()
self.budget = budget
self.eps = eps
self.use_simple = use_simple
self.model = None
self.train = False
def __repr__(self):
return (
'perturbation(Synonym-based word substitution budget={}, eps={})'
.format(self.budget, self.eps))
def _load_synonyms(self, path='data/synonyms.json'):
with open(path) as file:
self.synonym = json.loads(file.read())
logger.info('Synonym list loaded for {} words'.format(len(self.
synonym)))
def set_train(self, train):
self.train = train
def concretize(self, x, A, sign, aux):
assert self.model is not None
x_rep, mask, can_be_replaced = aux
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
dim_out = A.shape[1]
max_num_cand = x_rep.shape[2]
mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32,
device=A.device)
num_pos = int(np.max(np.sum(can_be_replaced, axis=-1)))
update_A = A.shape[-1] > num_pos * dim_word
if update_A:
bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape(
batch_size, -1, 1)).squeeze(-1)
else:
bias = 0.0
A = A.reshape(batch_size, dim_out, -1, dim_word)
A_new, x_new, x_rep_new, mask_new = [], [], [], []
zeros_A = torch.zeros(dim_out, dim_word, device=A.device)
zeros_w = torch.zeros(dim_word, device=A.device)
zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device)
zeros_mask = torch.zeros(max_num_cand, device=A.device)
for t in range(batch_size):
cnt = 0
for i in range(0, length):
if can_be_replaced[t][i]:
if update_A:
A_new.append(A[t, :, i, :])
x_new.append(x[t][i])
x_rep_new.append(x_rep[t][i])
mask_new.append(mask[t][i])
cnt += 1
if update_A:
A_new += [zeros_A] * (num_pos - cnt)
x_new += [zeros_w] * (num_pos - cnt)
x_rep_new += [zeros_rep] * (num_pos - cnt)
mask_new += [zeros_mask] * (num_pos - cnt)
if update_A:
A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word
).transpose(1, 2)
x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word)
x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos,
max_num_cand, dim_word)
mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand)
length = num_pos
A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2)
x = x.reshape(batch_size, length, -1, 1)
if sign == 1:
cmp, init = torch.max, -1e+30
else:
cmp, init = torch.min, 1e+30
init_tensor = torch.ones(batch_size, dim_out) * init
dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1)
]
dp[0][0] = torch.zeros(batch_size, dim_out)
A = A.reshape(batch_size * length, A.shape[2], A.shape[3])
Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x.
shape[3])).reshape(batch_size, length, A.shape[1])
Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length,
max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size,
length, A.shape[1], max_num_cand)
Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2)
Ax_rep_bound = cmp(Ax_rep, dim=-1).values
if self.use_simple and self.train:
return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias
for i in range(1, length + 1):
dp[i][0] = dp[i - 1][0] + Ax[:, i - 1]
for j in range(1, self.budget + 1):
dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1
] + Ax_rep_bound[:, i - 1])
dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1,
batch_size, dim_out)
return cmp(dp, dim=0).values + bias
def init(self, x, aux=None, forward=False):
tokens, batch = aux
self.tokens = tokens
assert len(x.shape) == 3
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
max_pos = 1
can_be_replaced = np.zeros((batch_size, length), dtype=np.bool)
self._build_substitution(batch)
for t in range(batch_size):
cnt = 0
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
for i in range(len(tokens[t])):
if tokens[t][i] == '[UNK]' or len(candidates[i]
) == 0 or tokens[t][i] != candidates[i][0]:
continue
for w in candidates[i][1:]:
if w in self.model.vocab:
can_be_replaced[t][i] = True
cnt += 1
break
max_pos = max(max_pos, cnt)
dim = max_pos * dim_word
if forward:
eye = torch.eye(dim_word)
lw = torch.zeros(batch_size, dim, length, dim_word)
lb = torch.zeros_like(x)
word_embeddings = self.model.word_embeddings.weight
vocab = self.model.vocab
x_rep = [[[] for i in range(length)] for t in range(batch_size)]
max_num_cand = 1
for t in range(batch_size):
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
cnt = 0
for i in range(length):
if can_be_replaced[t][i]:
word_embed = word_embeddings[vocab[tokens[t][i]]]
other_embed = x[t, i] - word_embed
if forward:
lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye
lb[t, i, :] = torch.zeros_like(word_embed)
for w in candidates[i][1:]:
if w in self.model.vocab:
x_rep[t][i].append(word_embeddings[self.model.
vocab[w]] + other_embed)
max_num_cand = max(max_num_cand, len(x_rep[t][i]))
cnt += 1
elif forward:
lb[t, i, :] = x[t, i, :]
if forward:
uw, ub = lw, lb
else:
lw = lb = uw = ub = None
zeros = torch.zeros(dim_word, device=x.device)
x_rep_, mask = [], []
for t in range(batch_size):
for i in range(length):
x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep
[t][i]))
mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len(
x_rep[t][i]))
x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand,
dim_word)
mask = torch.tensor(mask, dtype=torch.float32, device=x.device
).reshape(batch_size, length, max_num_cand)
x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps)
inf = 1e+20
lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * inf, dim=2).values
upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * -inf, dim=2).values
lower = torch.min(lower, x)
upper = torch.max(upper, x)
return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask,
can_be_replaced)
def _build_substitution(self, batch):
for t, example in enumerate(batch):
if 'candidates' not in example or example['candidates'] is None:
candidates = []
tokens = example['sentence'].strip().lower().split(' ')
for i in range(len(tokens)):
_cand = []
if tokens[i] in self.synonym:
for w in self.synonym[tokens[i]]:
if w in self.model.vocab:
_cand.append(w)
if len(_cand) > 0:
_cand = [tokens[i]] + _cand
candidates.append(_cand)
example['candidates'] = candidates
class Interval(tuple):
def __new__(self, lb=None, ub=None, ptb=None):
if ub is None:
assert isinstance(lb, tuple)
lb, ub = lb
return tuple.__new__(Interval, (lb, ub))
def __init__(self, lb, ub, ptb=None):
if ptb is None:
self.ptb = None
assert lb is ub
elif not isinstance(ptb, Perturbation):
raise ValueError(
'ptb must be a Perturbation object or None. Got type {}'.
format(type(ptb)))
else:
self.ptb = ptb
def __str__(self):
return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb)
def __repr__(self):
return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1],
self.ptb)
"""Checking if the other interval is tuple, keep the perturbation."""
@staticmethod
def make_interval(lb, ub, other):
if isinstance(other, Interval):
return Interval(lb, ub, other.ptb)
else:
return lb, ub
"""Given a tuple or Interval object, returns the norm and eps."""
@staticmethod
def get_perturbation(interval):
if isinstance(interval, Interval):
if isinstance(interval.ptb, PerturbationLpNorm):
return interval.ptb.norm, interval.ptb.eps
elif isinstance(interval.ptb, PerturbationSynonym):
return np.inf, 1.0
elif isinstance(interval.ptb, PerturbationL0Norm):
return 0, interval.ptb.eps, interval.ptb.ratio
elif interval.ptb is None:
raise RuntimeError(
'get_perturbation() encountered an interval that is not perturbed.'
)
else:
raise RuntimeError(
'get_perturbation() does not know how to handle {}'.
format(type(interval.ptb)))
else:
return np.inf, np.nan
"""Checking if a Interval or tuple object has perturbation enabled."""
@staticmethod
def is_perturbed(interval):
if isinstance(interval, Interval) and interval.ptb is None:
return False
else:
return True
class Bound(nn.Module):
def __init__(self, input_name, name, ori_name, attr={}, inputs=[],
output_index=0, options={}, device=None):
super().__init__()
self.output_name = []
(self.input_name, self.name, self.ori_name, self.attr, self.inputs,
self.output_index, self.options, self.device) = (input_name,
name, ori_name, attr, inputs, output_index, options, device)
self.fv = None
self.from_input = False
self.bounded = False
self.IBP_rets = None
self.perturbed = False
if options is not None and 'loss_fusion' in options:
self.loss_fusion = options['loss_fusion']
else:
self.loss_fusion = False
"""Check if the i-th input is with perturbation or not."""
def is_input_perturbed(self, i=0):
return self.inputs[i].perturbed
def forward(self, *x):
raise NotImplementedError
def interval_propagate(self, *v):
assert len(v) == 1
h_L, h_U = v[0]
return Interval.make_interval(self.forward(h_L), self.forward(h_U),
v[0])
def bound_forward(self, dim_in, last):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA):
raise NotImplementedError
def infer_batch_dim(self, batch_size, *x):
None
raise NotImplementedError
def broadcast_backward(self, A, x):
shape = x.default_shape
batch_dim = max(self.batch_dim, 0)
if isinstance(A, torch.Tensor):
if x.batch_dim == -1:
shape = torch.Size([A.shape[batch_dim + 1]] + list(shape))
dims = []
cnt_sum = A.ndim - len(shape) - 1
for i in range(1, A.ndim):
if i != self.batch_dim + 1 and cnt_sum > 0:
dims.append(i)
cnt_sum -= 1
if dims:
A = torch.sum(A, dim=dims)
else:
dims = list(range(1, 1 + A.ndim - 1 - len(shape)))
if dims:
A = torch.sum(A, dim=dims)
dims = []
for i in range(len(shape)):
if shape[i] == 1 and A.shape[i + 1] != 1:
dims.append(i + 1)
if dims:
A = torch.sum(A, dim=dims, keepdim=True)
assert A.shape[1:] == shape
elif type(A) == Patches:
pass
return A
@staticmethod
def broadcast_forward(dim_in, x, shape_res):
lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub
shape_x, shape_res = list(x.lb.shape), list(shape_res)
if lw is None:
lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device)
has_batch_size = False
else:
has_batch_size = True
while len(shape_x) < len(shape_res):
if not has_batch_size:
lw, uw = lw.unsqueeze(0), uw.unsqueeze(0)
lb, ub = lb.unsqueeze(0), ub.unsqueeze(0)
shape_x = [1] + shape_x
has_batch_size = True
else:
lw, uw = lw.unsqueeze(2), uw.unsqueeze(2)
lb, ub = lb.unsqueeze(1), ub.unsqueeze(1)
shape_x = [shape_x[0], 1] + shape_x[1:]
repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))]
lb, ub = lb.repeat(*repeat), ub.repeat(*repeat)
repeat = repeat[:1] + [1] + repeat[1:]
lw, uw = lw.repeat(*repeat), uw.repeat(*repeat)
return lw, lb, uw, ub
def get_bias(self, A, bias):
if A is None:
return 0
assert not isnan(A)
assert not isnan(bias)
if isinstance(A, torch.Tensor):
if torch.norm(A, p=1) < epsilon:
return 0
output_dim = A.shape[0]
if self.batch_dim != -1:
batch_size = A.shape[self.batch_dim + 1]
A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1
]).astype(np.int32), batch_size, np.prod(A.shape[self.
batch_dim + 2:]).astype(np.int32)]
A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size,
output_dim, -1)
bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape(
batch_size, -1, 1)
bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1)
else:
batch_size = A.shape[1]
A = A.view(output_dim, batch_size, -1)
bias_new = A.matmul(bias.view(-1))
if isnan(bias_new):
return 0
else:
return bias_new
elif type(A) == Patches:
if torch.norm(A.patches, p=1) < epsilon:
return 0
if self.batch_dim != -1:
batch_size = bias.shape[0]
bias = F.unfold(bias, kernel_size=A.patches.size(-1),
stride=A.stride, padding=A.padding).transpose(-2, -1
).unsqueeze(-2)
bias.size(1)
patches = A.patches.view(A.patches.size(0), A.patches.size(
1), A.patches.size(-4), A.patches.size(-1) * A.patches.
size(-2) * A.patches.size(-3))
prod = bias * patches
bias_new = prod.sum(-1).transpose(-2, -1)
bias_new = bias_new.view(batch_size, bias_new.size(-2), int
(math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new.
size(-1))))
else:
patches = A.patches
patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias
patches_reshape = patches_reshape.transpose(-1, -2)
return patches_reshape.view(patches_reshape.size(0),
patches_reshape.size(1), int(math.sqrt(patches_reshape.
size(2))), -1).transpose(0, 1)
return bias_new
else:
return NotImplementedError()
class BoundNot(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
def forward(self, x):
return x.logical_not()
def infer_batch_dim(self, batch_size, *x):
return x[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_name': 4, 'name': 4, 'ori_name': 4, 'attr': 4,
'inputs': 4, 'output_index': 4, 'options': _mock_config(loss_fusion
=MSELoss()), 'device': 0}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from numbers import Number
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_logical_not_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 != 0
tmp2 = tmp1 == 0
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_logical_not_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
if not isinstance(eps, Number):
if not isinstance(eps, torch.Tensor):
self.eps = torch.tensor(eps)
else:
self.eps = eps
if len(self.eps.shape) == 1:
self.eps = torch.diag(self.eps)
assert self.eps.shape[0] == self.eps.shape[1
], 'Argument [eps] must form a n by n square matrix.'
self.norm = 2
else:
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
if isinstance(self.eps, Number):
deviation = A.norm(self.dual_norm, -1) * self.eps
else:
deviation = A.matmul(self.eps.transpose(0, 1)).norm(
self.dual_norm, -1)
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
elif isinstance(self.eps, Number):
bound = x + sign * self.eps
else:
bound = x + sign * self.eps.transpose(0, 1).norm(self.
dual_norm, -1)
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
center = (x_U + x_L) / 2.0
diff = (x_U - x_L) / 2.0
if not A.identity == 1:
unfold_input = F.unfold(center, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound = prod.view(prod.size(0), prod.size(1), int(math.
sqrt(prod.size(2))), int(math.sqrt(prod.size(2))))
unfold_input = F.unfold(diff, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches.abs()
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound += sign * prod.view(prod.size(0), prod.size(1),
int(math.sqrt(prod.size(2))), int(math.sqrt(prod.
size(2))))
else:
bound = center + sign * diff
return bound
else:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
raise NotImplementedError()
if isinstance(A, eyeC) or isinstance(A, torch.Tensor):
return concretize_matrix(A)
elif isinstance(A, Patches):
return concretize_patches(A)
elif isinstance(A, BoundList):
for b in A.bound_list:
if isinstance(b, eyeC) or isinstance(b, torch.Tensor):
pass
else:
raise NotImplementedError()
def init(self, x, aux=None, forward=False):
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
else:
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
if self.norm == np.inf:
if self.x_L is None and self.x_U is None:
return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps)
else:
return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})'
.format(self.eps, self.x_L, self.x_U))
else:
return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm,
self.eps)
class PerturbationSynonym(Perturbation):
def __init__(self, budget, eps=1.0, use_simple=False):
super(PerturbationSynonym, self).__init__()
self._load_synonyms()
self.budget = budget
self.eps = eps
self.use_simple = use_simple
self.model = None
self.train = False
def __repr__(self):
return (
'perturbation(Synonym-based word substitution budget={}, eps={})'
.format(self.budget, self.eps))
def _load_synonyms(self, path='data/synonyms.json'):
with open(path) as file:
self.synonym = json.loads(file.read())
logger.info('Synonym list loaded for {} words'.format(len(self.
synonym)))
def set_train(self, train):
self.train = train
def concretize(self, x, A, sign, aux):
assert self.model is not None
x_rep, mask, can_be_replaced = aux
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
dim_out = A.shape[1]
max_num_cand = x_rep.shape[2]
mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32,
device=A.device)
num_pos = int(np.max(np.sum(can_be_replaced, axis=-1)))
update_A = A.shape[-1] > num_pos * dim_word
if update_A:
bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape(
batch_size, -1, 1)).squeeze(-1)
else:
bias = 0.0
A = A.reshape(batch_size, dim_out, -1, dim_word)
A_new, x_new, x_rep_new, mask_new = [], [], [], []
zeros_A = torch.zeros(dim_out, dim_word, device=A.device)
zeros_w = torch.zeros(dim_word, device=A.device)
zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device)
zeros_mask = torch.zeros(max_num_cand, device=A.device)
for t in range(batch_size):
cnt = 0
for i in range(0, length):
if can_be_replaced[t][i]:
if update_A:
A_new.append(A[t, :, i, :])
x_new.append(x[t][i])
x_rep_new.append(x_rep[t][i])
mask_new.append(mask[t][i])
cnt += 1
if update_A:
A_new += [zeros_A] * (num_pos - cnt)
x_new += [zeros_w] * (num_pos - cnt)
x_rep_new += [zeros_rep] * (num_pos - cnt)
mask_new += [zeros_mask] * (num_pos - cnt)
if update_A:
A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word
).transpose(1, 2)
x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word)
x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos,
max_num_cand, dim_word)
mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand)
length = num_pos
A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2)
x = x.reshape(batch_size, length, -1, 1)
if sign == 1:
cmp, init = torch.max, -1e+30
else:
cmp, init = torch.min, 1e+30
init_tensor = torch.ones(batch_size, dim_out) * init
dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1)
]
dp[0][0] = torch.zeros(batch_size, dim_out)
A = A.reshape(batch_size * length, A.shape[2], A.shape[3])
Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x.
shape[3])).reshape(batch_size, length, A.shape[1])
Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length,
max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size,
length, A.shape[1], max_num_cand)
Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2)
Ax_rep_bound = cmp(Ax_rep, dim=-1).values
if self.use_simple and self.train:
return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias
for i in range(1, length + 1):
dp[i][0] = dp[i - 1][0] + Ax[:, i - 1]
for j in range(1, self.budget + 1):
dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1
] + Ax_rep_bound[:, i - 1])
dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1,
batch_size, dim_out)
return cmp(dp, dim=0).values + bias
def init(self, x, aux=None, forward=False):
tokens, batch = aux
self.tokens = tokens
assert len(x.shape) == 3
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
max_pos = 1
can_be_replaced = np.zeros((batch_size, length), dtype=np.bool)
self._build_substitution(batch)
for t in range(batch_size):
cnt = 0
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
for i in range(len(tokens[t])):
if tokens[t][i] == '[UNK]' or len(candidates[i]
) == 0 or tokens[t][i] != candidates[i][0]:
continue
for w in candidates[i][1:]:
if w in self.model.vocab:
can_be_replaced[t][i] = True
cnt += 1
break
max_pos = max(max_pos, cnt)
dim = max_pos * dim_word
if forward:
eye = torch.eye(dim_word)
lw = torch.zeros(batch_size, dim, length, dim_word)
lb = torch.zeros_like(x)
word_embeddings = self.model.word_embeddings.weight
vocab = self.model.vocab
x_rep = [[[] for i in range(length)] for t in range(batch_size)]
max_num_cand = 1
for t in range(batch_size):
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
cnt = 0
for i in range(length):
if can_be_replaced[t][i]:
word_embed = word_embeddings[vocab[tokens[t][i]]]
other_embed = x[t, i] - word_embed
if forward:
lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye
lb[t, i, :] = torch.zeros_like(word_embed)
for w in candidates[i][1:]:
if w in self.model.vocab:
x_rep[t][i].append(word_embeddings[self.model.
vocab[w]] + other_embed)
max_num_cand = max(max_num_cand, len(x_rep[t][i]))
cnt += 1
elif forward:
lb[t, i, :] = x[t, i, :]
if forward:
uw, ub = lw, lb
else:
lw = lb = uw = ub = None
zeros = torch.zeros(dim_word, device=x.device)
x_rep_, mask = [], []
for t in range(batch_size):
for i in range(length):
x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep
[t][i]))
mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len(
x_rep[t][i]))
x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand,
dim_word)
mask = torch.tensor(mask, dtype=torch.float32, device=x.device
).reshape(batch_size, length, max_num_cand)
x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps)
inf = 1e+20
lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * inf, dim=2).values
upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * -inf, dim=2).values
lower = torch.min(lower, x)
upper = torch.max(upper, x)
return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask,
can_be_replaced)
def _build_substitution(self, batch):
for t, example in enumerate(batch):
if 'candidates' not in example or example['candidates'] is None:
candidates = []
tokens = example['sentence'].strip().lower().split(' ')
for i in range(len(tokens)):
_cand = []
if tokens[i] in self.synonym:
for w in self.synonym[tokens[i]]:
if w in self.model.vocab:
_cand.append(w)
if len(_cand) > 0:
_cand = [tokens[i]] + _cand
candidates.append(_cand)
example['candidates'] = candidates
class Interval(tuple):
def __new__(self, lb=None, ub=None, ptb=None):
if ub is None:
assert isinstance(lb, tuple)
lb, ub = lb
return tuple.__new__(Interval, (lb, ub))
def __init__(self, lb, ub, ptb=None):
if ptb is None:
self.ptb = None
assert lb is ub
elif not isinstance(ptb, Perturbation):
raise ValueError(
'ptb must be a Perturbation object or None. Got type {}'.
format(type(ptb)))
else:
self.ptb = ptb
def __str__(self):
return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb)
def __repr__(self):
return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1],
self.ptb)
"""Checking if the other interval is tuple, keep the perturbation."""
@staticmethod
def make_interval(lb, ub, other):
if isinstance(other, Interval):
return Interval(lb, ub, other.ptb)
else:
return lb, ub
"""Given a tuple or Interval object, returns the norm and eps."""
@staticmethod
def get_perturbation(interval):
if isinstance(interval, Interval):
if isinstance(interval.ptb, PerturbationLpNorm):
return interval.ptb.norm, interval.ptb.eps
elif isinstance(interval.ptb, PerturbationSynonym):
return np.inf, 1.0
elif isinstance(interval.ptb, PerturbationL0Norm):
return 0, interval.ptb.eps, interval.ptb.ratio
elif interval.ptb is None:
raise RuntimeError(
'get_perturbation() encountered an interval that is not perturbed.'
)
else:
raise RuntimeError(
'get_perturbation() does not know how to handle {}'.
format(type(interval.ptb)))
else:
return np.inf, np.nan
"""Checking if a Interval or tuple object has perturbation enabled."""
@staticmethod
def is_perturbed(interval):
if isinstance(interval, Interval) and interval.ptb is None:
return False
else:
return True
class Bound(nn.Module):
def __init__(self, input_name, name, ori_name, attr={}, inputs=[],
output_index=0, options={}, device=None):
super().__init__()
self.output_name = []
(self.input_name, self.name, self.ori_name, self.attr, self.inputs,
self.output_index, self.options, self.device) = (input_name,
name, ori_name, attr, inputs, output_index, options, device)
self.fv = None
self.from_input = False
self.bounded = False
self.IBP_rets = None
self.perturbed = False
if options is not None and 'loss_fusion' in options:
self.loss_fusion = options['loss_fusion']
else:
self.loss_fusion = False
"""Check if the i-th input is with perturbation or not."""
def is_input_perturbed(self, i=0):
return self.inputs[i].perturbed
def forward(self, *x):
raise NotImplementedError
def interval_propagate(self, *v):
assert len(v) == 1
h_L, h_U = v[0]
return Interval.make_interval(self.forward(h_L), self.forward(h_U),
v[0])
def bound_forward(self, dim_in, last):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA):
raise NotImplementedError
def infer_batch_dim(self, batch_size, *x):
None
raise NotImplementedError
def broadcast_backward(self, A, x):
shape = x.default_shape
batch_dim = max(self.batch_dim, 0)
if isinstance(A, torch.Tensor):
if x.batch_dim == -1:
shape = torch.Size([A.shape[batch_dim + 1]] + list(shape))
dims = []
cnt_sum = A.ndim - len(shape) - 1
for i in range(1, A.ndim):
if i != self.batch_dim + 1 and cnt_sum > 0:
dims.append(i)
cnt_sum -= 1
if dims:
A = torch.sum(A, dim=dims)
else:
dims = list(range(1, 1 + A.ndim - 1 - len(shape)))
if dims:
A = torch.sum(A, dim=dims)
dims = []
for i in range(len(shape)):
if shape[i] == 1 and A.shape[i + 1] != 1:
dims.append(i + 1)
if dims:
A = torch.sum(A, dim=dims, keepdim=True)
assert A.shape[1:] == shape
elif type(A) == Patches:
pass
return A
@staticmethod
def broadcast_forward(dim_in, x, shape_res):
lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub
shape_x, shape_res = list(x.lb.shape), list(shape_res)
if lw is None:
lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device)
has_batch_size = False
else:
has_batch_size = True
while len(shape_x) < len(shape_res):
if not has_batch_size:
lw, uw = lw.unsqueeze(0), uw.unsqueeze(0)
lb, ub = lb.unsqueeze(0), ub.unsqueeze(0)
shape_x = [1] + shape_x
has_batch_size = True
else:
lw, uw = lw.unsqueeze(2), uw.unsqueeze(2)
lb, ub = lb.unsqueeze(1), ub.unsqueeze(1)
shape_x = [shape_x[0], 1] + shape_x[1:]
repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))]
lb, ub = lb.repeat(*repeat), ub.repeat(*repeat)
repeat = repeat[:1] + [1] + repeat[1:]
lw, uw = lw.repeat(*repeat), uw.repeat(*repeat)
return lw, lb, uw, ub
def get_bias(self, A, bias):
if A is None:
return 0
assert not isnan(A)
assert not isnan(bias)
if isinstance(A, torch.Tensor):
if torch.norm(A, p=1) < epsilon:
return 0
output_dim = A.shape[0]
if self.batch_dim != -1:
batch_size = A.shape[self.batch_dim + 1]
A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1
]).astype(np.int32), batch_size, np.prod(A.shape[self.
batch_dim + 2:]).astype(np.int32)]
A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size,
output_dim, -1)
bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape(
batch_size, -1, 1)
bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1)
else:
batch_size = A.shape[1]
A = A.view(output_dim, batch_size, -1)
bias_new = A.matmul(bias.view(-1))
if isnan(bias_new):
return 0
else:
return bias_new
elif type(A) == Patches:
if torch.norm(A.patches, p=1) < epsilon:
return 0
if self.batch_dim != -1:
batch_size = bias.shape[0]
bias = F.unfold(bias, kernel_size=A.patches.size(-1),
stride=A.stride, padding=A.padding).transpose(-2, -1
).unsqueeze(-2)
bias.size(1)
patches = A.patches.view(A.patches.size(0), A.patches.size(
1), A.patches.size(-4), A.patches.size(-1) * A.patches.
size(-2) * A.patches.size(-3))
prod = bias * patches
bias_new = prod.sum(-1).transpose(-2, -1)
bias_new = bias_new.view(batch_size, bias_new.size(-2), int
(math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new.
size(-1))))
else:
patches = A.patches
patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias
patches_reshape = patches_reshape.transpose(-1, -2)
return patches_reshape.view(patches_reshape.size(0),
patches_reshape.size(1), int(math.sqrt(patches_reshape.
size(2))), -1).transpose(0, 1)
return bias_new
else:
return NotImplementedError()
class BoundNotNew(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
def infer_batch_dim(self, batch_size, *x):
return x[0]
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Mahoumaru/auto_LiRPA | BoundNot | false | 13,223 | [
"BSD-3-Clause"
]
| 0 | b03a6c36eb1b921726778359d6d2b94e0cd7e480 | https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480 |
BoundSqrt | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/57/c57r2k62dzemkboglo2dvxlbmsaebzf7ocnhy3ligubspzciluam.py
# Topologically Sorted Source Nodes: [sqrt], Original ATen: [aten.sqrt]
# Source node to ATen node mapping:
# sqrt => sqrt
# Graph fragment:
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_sqrt_0 = async_compile.triton('triton_poi_fused_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.sqrt(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sqrt], Original ATen: [aten.sqrt]
stream0 = get_raw_stream(0)
triton_poi_fused_sqrt_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from numbers import Number
from torch.nn import MSELoss
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
if not isinstance(eps, Number):
if not isinstance(eps, torch.Tensor):
self.eps = torch.tensor(eps)
else:
self.eps = eps
if len(self.eps.shape) == 1:
self.eps = torch.diag(self.eps)
assert self.eps.shape[0] == self.eps.shape[1
], 'Argument [eps] must form a n by n square matrix.'
self.norm = 2
else:
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
if isinstance(self.eps, Number):
deviation = A.norm(self.dual_norm, -1) * self.eps
else:
deviation = A.matmul(self.eps.transpose(0, 1)).norm(
self.dual_norm, -1)
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
elif isinstance(self.eps, Number):
bound = x + sign * self.eps
else:
bound = x + sign * self.eps.transpose(0, 1).norm(self.
dual_norm, -1)
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
center = (x_U + x_L) / 2.0
diff = (x_U - x_L) / 2.0
if not A.identity == 1:
unfold_input = F.unfold(center, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound = prod.view(prod.size(0), prod.size(1), int(math.
sqrt(prod.size(2))), int(math.sqrt(prod.size(2))))
unfold_input = F.unfold(diff, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches.abs()
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound += sign * prod.view(prod.size(0), prod.size(1),
int(math.sqrt(prod.size(2))), int(math.sqrt(prod.
size(2))))
else:
bound = center + sign * diff
return bound
else:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
raise NotImplementedError()
if isinstance(A, eyeC) or isinstance(A, torch.Tensor):
return concretize_matrix(A)
elif isinstance(A, Patches):
return concretize_patches(A)
elif isinstance(A, BoundList):
for b in A.bound_list:
if isinstance(b, eyeC) or isinstance(b, torch.Tensor):
pass
else:
raise NotImplementedError()
def init(self, x, aux=None, forward=False):
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
else:
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
if self.norm == np.inf:
if self.x_L is None and self.x_U is None:
return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps)
else:
return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})'
.format(self.eps, self.x_L, self.x_U))
else:
return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm,
self.eps)
class PerturbationSynonym(Perturbation):
def __init__(self, budget, eps=1.0, use_simple=False):
super(PerturbationSynonym, self).__init__()
self._load_synonyms()
self.budget = budget
self.eps = eps
self.use_simple = use_simple
self.model = None
self.train = False
def __repr__(self):
return (
'perturbation(Synonym-based word substitution budget={}, eps={})'
.format(self.budget, self.eps))
def _load_synonyms(self, path='data/synonyms.json'):
with open(path) as file:
self.synonym = json.loads(file.read())
logger.info('Synonym list loaded for {} words'.format(len(self.
synonym)))
def set_train(self, train):
self.train = train
def concretize(self, x, A, sign, aux):
assert self.model is not None
x_rep, mask, can_be_replaced = aux
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
dim_out = A.shape[1]
max_num_cand = x_rep.shape[2]
mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32,
device=A.device)
num_pos = int(np.max(np.sum(can_be_replaced, axis=-1)))
update_A = A.shape[-1] > num_pos * dim_word
if update_A:
bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape(
batch_size, -1, 1)).squeeze(-1)
else:
bias = 0.0
A = A.reshape(batch_size, dim_out, -1, dim_word)
A_new, x_new, x_rep_new, mask_new = [], [], [], []
zeros_A = torch.zeros(dim_out, dim_word, device=A.device)
zeros_w = torch.zeros(dim_word, device=A.device)
zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device)
zeros_mask = torch.zeros(max_num_cand, device=A.device)
for t in range(batch_size):
cnt = 0
for i in range(0, length):
if can_be_replaced[t][i]:
if update_A:
A_new.append(A[t, :, i, :])
x_new.append(x[t][i])
x_rep_new.append(x_rep[t][i])
mask_new.append(mask[t][i])
cnt += 1
if update_A:
A_new += [zeros_A] * (num_pos - cnt)
x_new += [zeros_w] * (num_pos - cnt)
x_rep_new += [zeros_rep] * (num_pos - cnt)
mask_new += [zeros_mask] * (num_pos - cnt)
if update_A:
A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word
).transpose(1, 2)
x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word)
x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos,
max_num_cand, dim_word)
mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand)
length = num_pos
A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2)
x = x.reshape(batch_size, length, -1, 1)
if sign == 1:
cmp, init = torch.max, -1e+30
else:
cmp, init = torch.min, 1e+30
init_tensor = torch.ones(batch_size, dim_out) * init
dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1)
]
dp[0][0] = torch.zeros(batch_size, dim_out)
A = A.reshape(batch_size * length, A.shape[2], A.shape[3])
Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x.
shape[3])).reshape(batch_size, length, A.shape[1])
Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length,
max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size,
length, A.shape[1], max_num_cand)
Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2)
Ax_rep_bound = cmp(Ax_rep, dim=-1).values
if self.use_simple and self.train:
return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias
for i in range(1, length + 1):
dp[i][0] = dp[i - 1][0] + Ax[:, i - 1]
for j in range(1, self.budget + 1):
dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1
] + Ax_rep_bound[:, i - 1])
dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1,
batch_size, dim_out)
return cmp(dp, dim=0).values + bias
def init(self, x, aux=None, forward=False):
tokens, batch = aux
self.tokens = tokens
assert len(x.shape) == 3
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
max_pos = 1
can_be_replaced = np.zeros((batch_size, length), dtype=np.bool)
self._build_substitution(batch)
for t in range(batch_size):
cnt = 0
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
for i in range(len(tokens[t])):
if tokens[t][i] == '[UNK]' or len(candidates[i]
) == 0 or tokens[t][i] != candidates[i][0]:
continue
for w in candidates[i][1:]:
if w in self.model.vocab:
can_be_replaced[t][i] = True
cnt += 1
break
max_pos = max(max_pos, cnt)
dim = max_pos * dim_word
if forward:
eye = torch.eye(dim_word)
lw = torch.zeros(batch_size, dim, length, dim_word)
lb = torch.zeros_like(x)
word_embeddings = self.model.word_embeddings.weight
vocab = self.model.vocab
x_rep = [[[] for i in range(length)] for t in range(batch_size)]
max_num_cand = 1
for t in range(batch_size):
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
cnt = 0
for i in range(length):
if can_be_replaced[t][i]:
word_embed = word_embeddings[vocab[tokens[t][i]]]
other_embed = x[t, i] - word_embed
if forward:
lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye
lb[t, i, :] = torch.zeros_like(word_embed)
for w in candidates[i][1:]:
if w in self.model.vocab:
x_rep[t][i].append(word_embeddings[self.model.
vocab[w]] + other_embed)
max_num_cand = max(max_num_cand, len(x_rep[t][i]))
cnt += 1
elif forward:
lb[t, i, :] = x[t, i, :]
if forward:
uw, ub = lw, lb
else:
lw = lb = uw = ub = None
zeros = torch.zeros(dim_word, device=x.device)
x_rep_, mask = [], []
for t in range(batch_size):
for i in range(length):
x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep
[t][i]))
mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len(
x_rep[t][i]))
x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand,
dim_word)
mask = torch.tensor(mask, dtype=torch.float32, device=x.device
).reshape(batch_size, length, max_num_cand)
x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps)
inf = 1e+20
lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * inf, dim=2).values
upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * -inf, dim=2).values
lower = torch.min(lower, x)
upper = torch.max(upper, x)
return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask,
can_be_replaced)
def _build_substitution(self, batch):
for t, example in enumerate(batch):
if 'candidates' not in example or example['candidates'] is None:
candidates = []
tokens = example['sentence'].strip().lower().split(' ')
for i in range(len(tokens)):
_cand = []
if tokens[i] in self.synonym:
for w in self.synonym[tokens[i]]:
if w in self.model.vocab:
_cand.append(w)
if len(_cand) > 0:
_cand = [tokens[i]] + _cand
candidates.append(_cand)
example['candidates'] = candidates
class Interval(tuple):
def __new__(self, lb=None, ub=None, ptb=None):
if ub is None:
assert isinstance(lb, tuple)
lb, ub = lb
return tuple.__new__(Interval, (lb, ub))
def __init__(self, lb, ub, ptb=None):
if ptb is None:
self.ptb = None
assert lb is ub
elif not isinstance(ptb, Perturbation):
raise ValueError(
'ptb must be a Perturbation object or None. Got type {}'.
format(type(ptb)))
else:
self.ptb = ptb
def __str__(self):
return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb)
def __repr__(self):
return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1],
self.ptb)
"""Checking if the other interval is tuple, keep the perturbation."""
@staticmethod
def make_interval(lb, ub, other):
if isinstance(other, Interval):
return Interval(lb, ub, other.ptb)
else:
return lb, ub
"""Given a tuple or Interval object, returns the norm and eps."""
@staticmethod
def get_perturbation(interval):
if isinstance(interval, Interval):
if isinstance(interval.ptb, PerturbationLpNorm):
return interval.ptb.norm, interval.ptb.eps
elif isinstance(interval.ptb, PerturbationSynonym):
return np.inf, 1.0
elif isinstance(interval.ptb, PerturbationL0Norm):
return 0, interval.ptb.eps, interval.ptb.ratio
elif interval.ptb is None:
raise RuntimeError(
'get_perturbation() encountered an interval that is not perturbed.'
)
else:
raise RuntimeError(
'get_perturbation() does not know how to handle {}'.
format(type(interval.ptb)))
else:
return np.inf, np.nan
"""Checking if a Interval or tuple object has perturbation enabled."""
@staticmethod
def is_perturbed(interval):
if isinstance(interval, Interval) and interval.ptb is None:
return False
else:
return True
class Bound(nn.Module):
def __init__(self, input_name, name, ori_name, attr={}, inputs=[],
output_index=0, options={}, device=None):
super().__init__()
self.output_name = []
(self.input_name, self.name, self.ori_name, self.attr, self.inputs,
self.output_index, self.options, self.device) = (input_name,
name, ori_name, attr, inputs, output_index, options, device)
self.fv = None
self.from_input = False
self.bounded = False
self.IBP_rets = None
self.perturbed = False
if options is not None and 'loss_fusion' in options:
self.loss_fusion = options['loss_fusion']
else:
self.loss_fusion = False
"""Check if the i-th input is with perturbation or not."""
def is_input_perturbed(self, i=0):
return self.inputs[i].perturbed
def forward(self, *x):
raise NotImplementedError
def interval_propagate(self, *v):
assert len(v) == 1
h_L, h_U = v[0]
return Interval.make_interval(self.forward(h_L), self.forward(h_U),
v[0])
def bound_forward(self, dim_in, last):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA):
raise NotImplementedError
def infer_batch_dim(self, batch_size, *x):
None
raise NotImplementedError
def broadcast_backward(self, A, x):
shape = x.default_shape
batch_dim = max(self.batch_dim, 0)
if isinstance(A, torch.Tensor):
if x.batch_dim == -1:
shape = torch.Size([A.shape[batch_dim + 1]] + list(shape))
dims = []
cnt_sum = A.ndim - len(shape) - 1
for i in range(1, A.ndim):
if i != self.batch_dim + 1 and cnt_sum > 0:
dims.append(i)
cnt_sum -= 1
if dims:
A = torch.sum(A, dim=dims)
else:
dims = list(range(1, 1 + A.ndim - 1 - len(shape)))
if dims:
A = torch.sum(A, dim=dims)
dims = []
for i in range(len(shape)):
if shape[i] == 1 and A.shape[i + 1] != 1:
dims.append(i + 1)
if dims:
A = torch.sum(A, dim=dims, keepdim=True)
assert A.shape[1:] == shape
elif type(A) == Patches:
pass
return A
@staticmethod
def broadcast_forward(dim_in, x, shape_res):
lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub
shape_x, shape_res = list(x.lb.shape), list(shape_res)
if lw is None:
lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device)
has_batch_size = False
else:
has_batch_size = True
while len(shape_x) < len(shape_res):
if not has_batch_size:
lw, uw = lw.unsqueeze(0), uw.unsqueeze(0)
lb, ub = lb.unsqueeze(0), ub.unsqueeze(0)
shape_x = [1] + shape_x
has_batch_size = True
else:
lw, uw = lw.unsqueeze(2), uw.unsqueeze(2)
lb, ub = lb.unsqueeze(1), ub.unsqueeze(1)
shape_x = [shape_x[0], 1] + shape_x[1:]
repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))]
lb, ub = lb.repeat(*repeat), ub.repeat(*repeat)
repeat = repeat[:1] + [1] + repeat[1:]
lw, uw = lw.repeat(*repeat), uw.repeat(*repeat)
return lw, lb, uw, ub
def get_bias(self, A, bias):
if A is None:
return 0
assert not isnan(A)
assert not isnan(bias)
if isinstance(A, torch.Tensor):
if torch.norm(A, p=1) < epsilon:
return 0
output_dim = A.shape[0]
if self.batch_dim != -1:
batch_size = A.shape[self.batch_dim + 1]
A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1
]).astype(np.int32), batch_size, np.prod(A.shape[self.
batch_dim + 2:]).astype(np.int32)]
A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size,
output_dim, -1)
bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape(
batch_size, -1, 1)
bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1)
else:
batch_size = A.shape[1]
A = A.view(output_dim, batch_size, -1)
bias_new = A.matmul(bias.view(-1))
if isnan(bias_new):
return 0
else:
return bias_new
elif type(A) == Patches:
if torch.norm(A.patches, p=1) < epsilon:
return 0
if self.batch_dim != -1:
batch_size = bias.shape[0]
bias = F.unfold(bias, kernel_size=A.patches.size(-1),
stride=A.stride, padding=A.padding).transpose(-2, -1
).unsqueeze(-2)
bias.size(1)
patches = A.patches.view(A.patches.size(0), A.patches.size(
1), A.patches.size(-4), A.patches.size(-1) * A.patches.
size(-2) * A.patches.size(-3))
prod = bias * patches
bias_new = prod.sum(-1).transpose(-2, -1)
bias_new = bias_new.view(batch_size, bias_new.size(-2), int
(math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new.
size(-1))))
else:
patches = A.patches
patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias
patches_reshape = patches_reshape.transpose(-1, -2)
return patches_reshape.view(patches_reshape.size(0),
patches_reshape.size(1), int(math.sqrt(patches_reshape.
size(2))), -1).transpose(0, 1)
return bias_new
else:
return NotImplementedError()
class BoundSqrt(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
self.nonlinear = True
def forward(self, x):
return torch.sqrt(x)
def infer_batch_dim(self, batch_size, *x):
return x[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_name': 4, 'name': 4, 'ori_name': 4, 'attr': 4,
'inputs': 4, 'output_index': 4, 'options': _mock_config(loss_fusion
=MSELoss()), 'device': 0}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from numbers import Number
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.sqrt(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sqrt_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
if not isinstance(eps, Number):
if not isinstance(eps, torch.Tensor):
self.eps = torch.tensor(eps)
else:
self.eps = eps
if len(self.eps.shape) == 1:
self.eps = torch.diag(self.eps)
assert self.eps.shape[0] == self.eps.shape[1
], 'Argument [eps] must form a n by n square matrix.'
self.norm = 2
else:
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
if isinstance(self.eps, Number):
deviation = A.norm(self.dual_norm, -1) * self.eps
else:
deviation = A.matmul(self.eps.transpose(0, 1)).norm(
self.dual_norm, -1)
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
elif isinstance(self.eps, Number):
bound = x + sign * self.eps
else:
bound = x + sign * self.eps.transpose(0, 1).norm(self.
dual_norm, -1)
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
center = (x_U + x_L) / 2.0
diff = (x_U - x_L) / 2.0
if not A.identity == 1:
unfold_input = F.unfold(center, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound = prod.view(prod.size(0), prod.size(1), int(math.
sqrt(prod.size(2))), int(math.sqrt(prod.size(2))))
unfold_input = F.unfold(diff, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches.abs()
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound += sign * prod.view(prod.size(0), prod.size(1),
int(math.sqrt(prod.size(2))), int(math.sqrt(prod.
size(2))))
else:
bound = center + sign * diff
return bound
else:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
raise NotImplementedError()
if isinstance(A, eyeC) or isinstance(A, torch.Tensor):
return concretize_matrix(A)
elif isinstance(A, Patches):
return concretize_patches(A)
elif isinstance(A, BoundList):
for b in A.bound_list:
if isinstance(b, eyeC) or isinstance(b, torch.Tensor):
pass
else:
raise NotImplementedError()
def init(self, x, aux=None, forward=False):
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
else:
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
if self.norm == np.inf:
if self.x_L is None and self.x_U is None:
return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps)
else:
return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})'
.format(self.eps, self.x_L, self.x_U))
else:
return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm,
self.eps)
class PerturbationSynonym(Perturbation):
def __init__(self, budget, eps=1.0, use_simple=False):
super(PerturbationSynonym, self).__init__()
self._load_synonyms()
self.budget = budget
self.eps = eps
self.use_simple = use_simple
self.model = None
self.train = False
def __repr__(self):
return (
'perturbation(Synonym-based word substitution budget={}, eps={})'
.format(self.budget, self.eps))
def _load_synonyms(self, path='data/synonyms.json'):
with open(path) as file:
self.synonym = json.loads(file.read())
logger.info('Synonym list loaded for {} words'.format(len(self.
synonym)))
def set_train(self, train):
self.train = train
def concretize(self, x, A, sign, aux):
assert self.model is not None
x_rep, mask, can_be_replaced = aux
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
dim_out = A.shape[1]
max_num_cand = x_rep.shape[2]
mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32,
device=A.device)
num_pos = int(np.max(np.sum(can_be_replaced, axis=-1)))
update_A = A.shape[-1] > num_pos * dim_word
if update_A:
bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape(
batch_size, -1, 1)).squeeze(-1)
else:
bias = 0.0
A = A.reshape(batch_size, dim_out, -1, dim_word)
A_new, x_new, x_rep_new, mask_new = [], [], [], []
zeros_A = torch.zeros(dim_out, dim_word, device=A.device)
zeros_w = torch.zeros(dim_word, device=A.device)
zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device)
zeros_mask = torch.zeros(max_num_cand, device=A.device)
for t in range(batch_size):
cnt = 0
for i in range(0, length):
if can_be_replaced[t][i]:
if update_A:
A_new.append(A[t, :, i, :])
x_new.append(x[t][i])
x_rep_new.append(x_rep[t][i])
mask_new.append(mask[t][i])
cnt += 1
if update_A:
A_new += [zeros_A] * (num_pos - cnt)
x_new += [zeros_w] * (num_pos - cnt)
x_rep_new += [zeros_rep] * (num_pos - cnt)
mask_new += [zeros_mask] * (num_pos - cnt)
if update_A:
A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word
).transpose(1, 2)
x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word)
x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos,
max_num_cand, dim_word)
mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand)
length = num_pos
A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2)
x = x.reshape(batch_size, length, -1, 1)
if sign == 1:
cmp, init = torch.max, -1e+30
else:
cmp, init = torch.min, 1e+30
init_tensor = torch.ones(batch_size, dim_out) * init
dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1)
]
dp[0][0] = torch.zeros(batch_size, dim_out)
A = A.reshape(batch_size * length, A.shape[2], A.shape[3])
Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x.
shape[3])).reshape(batch_size, length, A.shape[1])
Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length,
max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size,
length, A.shape[1], max_num_cand)
Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2)
Ax_rep_bound = cmp(Ax_rep, dim=-1).values
if self.use_simple and self.train:
return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias
for i in range(1, length + 1):
dp[i][0] = dp[i - 1][0] + Ax[:, i - 1]
for j in range(1, self.budget + 1):
dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1
] + Ax_rep_bound[:, i - 1])
dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1,
batch_size, dim_out)
return cmp(dp, dim=0).values + bias
def init(self, x, aux=None, forward=False):
tokens, batch = aux
self.tokens = tokens
assert len(x.shape) == 3
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
max_pos = 1
can_be_replaced = np.zeros((batch_size, length), dtype=np.bool)
self._build_substitution(batch)
for t in range(batch_size):
cnt = 0
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
for i in range(len(tokens[t])):
if tokens[t][i] == '[UNK]' or len(candidates[i]
) == 0 or tokens[t][i] != candidates[i][0]:
continue
for w in candidates[i][1:]:
if w in self.model.vocab:
can_be_replaced[t][i] = True
cnt += 1
break
max_pos = max(max_pos, cnt)
dim = max_pos * dim_word
if forward:
eye = torch.eye(dim_word)
lw = torch.zeros(batch_size, dim, length, dim_word)
lb = torch.zeros_like(x)
word_embeddings = self.model.word_embeddings.weight
vocab = self.model.vocab
x_rep = [[[] for i in range(length)] for t in range(batch_size)]
max_num_cand = 1
for t in range(batch_size):
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
cnt = 0
for i in range(length):
if can_be_replaced[t][i]:
word_embed = word_embeddings[vocab[tokens[t][i]]]
other_embed = x[t, i] - word_embed
if forward:
lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye
lb[t, i, :] = torch.zeros_like(word_embed)
for w in candidates[i][1:]:
if w in self.model.vocab:
x_rep[t][i].append(word_embeddings[self.model.
vocab[w]] + other_embed)
max_num_cand = max(max_num_cand, len(x_rep[t][i]))
cnt += 1
elif forward:
lb[t, i, :] = x[t, i, :]
if forward:
uw, ub = lw, lb
else:
lw = lb = uw = ub = None
zeros = torch.zeros(dim_word, device=x.device)
x_rep_, mask = [], []
for t in range(batch_size):
for i in range(length):
x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep
[t][i]))
mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len(
x_rep[t][i]))
x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand,
dim_word)
mask = torch.tensor(mask, dtype=torch.float32, device=x.device
).reshape(batch_size, length, max_num_cand)
x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps)
inf = 1e+20
lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * inf, dim=2).values
upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * -inf, dim=2).values
lower = torch.min(lower, x)
upper = torch.max(upper, x)
return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask,
can_be_replaced)
def _build_substitution(self, batch):
for t, example in enumerate(batch):
if 'candidates' not in example or example['candidates'] is None:
candidates = []
tokens = example['sentence'].strip().lower().split(' ')
for i in range(len(tokens)):
_cand = []
if tokens[i] in self.synonym:
for w in self.synonym[tokens[i]]:
if w in self.model.vocab:
_cand.append(w)
if len(_cand) > 0:
_cand = [tokens[i]] + _cand
candidates.append(_cand)
example['candidates'] = candidates
class Interval(tuple):
def __new__(self, lb=None, ub=None, ptb=None):
if ub is None:
assert isinstance(lb, tuple)
lb, ub = lb
return tuple.__new__(Interval, (lb, ub))
def __init__(self, lb, ub, ptb=None):
if ptb is None:
self.ptb = None
assert lb is ub
elif not isinstance(ptb, Perturbation):
raise ValueError(
'ptb must be a Perturbation object or None. Got type {}'.
format(type(ptb)))
else:
self.ptb = ptb
def __str__(self):
return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb)
def __repr__(self):
return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1],
self.ptb)
"""Checking if the other interval is tuple, keep the perturbation."""
@staticmethod
def make_interval(lb, ub, other):
if isinstance(other, Interval):
return Interval(lb, ub, other.ptb)
else:
return lb, ub
"""Given a tuple or Interval object, returns the norm and eps."""
@staticmethod
def get_perturbation(interval):
if isinstance(interval, Interval):
if isinstance(interval.ptb, PerturbationLpNorm):
return interval.ptb.norm, interval.ptb.eps
elif isinstance(interval.ptb, PerturbationSynonym):
return np.inf, 1.0
elif isinstance(interval.ptb, PerturbationL0Norm):
return 0, interval.ptb.eps, interval.ptb.ratio
elif interval.ptb is None:
raise RuntimeError(
'get_perturbation() encountered an interval that is not perturbed.'
)
else:
raise RuntimeError(
'get_perturbation() does not know how to handle {}'.
format(type(interval.ptb)))
else:
return np.inf, np.nan
"""Checking if a Interval or tuple object has perturbation enabled."""
@staticmethod
def is_perturbed(interval):
if isinstance(interval, Interval) and interval.ptb is None:
return False
else:
return True
class Bound(nn.Module):
def __init__(self, input_name, name, ori_name, attr={}, inputs=[],
output_index=0, options={}, device=None):
super().__init__()
self.output_name = []
(self.input_name, self.name, self.ori_name, self.attr, self.inputs,
self.output_index, self.options, self.device) = (input_name,
name, ori_name, attr, inputs, output_index, options, device)
self.fv = None
self.from_input = False
self.bounded = False
self.IBP_rets = None
self.perturbed = False
if options is not None and 'loss_fusion' in options:
self.loss_fusion = options['loss_fusion']
else:
self.loss_fusion = False
"""Check if the i-th input is with perturbation or not."""
def is_input_perturbed(self, i=0):
return self.inputs[i].perturbed
def forward(self, *x):
raise NotImplementedError
def interval_propagate(self, *v):
assert len(v) == 1
h_L, h_U = v[0]
return Interval.make_interval(self.forward(h_L), self.forward(h_U),
v[0])
def bound_forward(self, dim_in, last):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA):
raise NotImplementedError
def infer_batch_dim(self, batch_size, *x):
None
raise NotImplementedError
def broadcast_backward(self, A, x):
shape = x.default_shape
batch_dim = max(self.batch_dim, 0)
if isinstance(A, torch.Tensor):
if x.batch_dim == -1:
shape = torch.Size([A.shape[batch_dim + 1]] + list(shape))
dims = []
cnt_sum = A.ndim - len(shape) - 1
for i in range(1, A.ndim):
if i != self.batch_dim + 1 and cnt_sum > 0:
dims.append(i)
cnt_sum -= 1
if dims:
A = torch.sum(A, dim=dims)
else:
dims = list(range(1, 1 + A.ndim - 1 - len(shape)))
if dims:
A = torch.sum(A, dim=dims)
dims = []
for i in range(len(shape)):
if shape[i] == 1 and A.shape[i + 1] != 1:
dims.append(i + 1)
if dims:
A = torch.sum(A, dim=dims, keepdim=True)
assert A.shape[1:] == shape
elif type(A) == Patches:
pass
return A
@staticmethod
def broadcast_forward(dim_in, x, shape_res):
lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub
shape_x, shape_res = list(x.lb.shape), list(shape_res)
if lw is None:
lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device)
has_batch_size = False
else:
has_batch_size = True
while len(shape_x) < len(shape_res):
if not has_batch_size:
lw, uw = lw.unsqueeze(0), uw.unsqueeze(0)
lb, ub = lb.unsqueeze(0), ub.unsqueeze(0)
shape_x = [1] + shape_x
has_batch_size = True
else:
lw, uw = lw.unsqueeze(2), uw.unsqueeze(2)
lb, ub = lb.unsqueeze(1), ub.unsqueeze(1)
shape_x = [shape_x[0], 1] + shape_x[1:]
repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))]
lb, ub = lb.repeat(*repeat), ub.repeat(*repeat)
repeat = repeat[:1] + [1] + repeat[1:]
lw, uw = lw.repeat(*repeat), uw.repeat(*repeat)
return lw, lb, uw, ub
def get_bias(self, A, bias):
if A is None:
return 0
assert not isnan(A)
assert not isnan(bias)
if isinstance(A, torch.Tensor):
if torch.norm(A, p=1) < epsilon:
return 0
output_dim = A.shape[0]
if self.batch_dim != -1:
batch_size = A.shape[self.batch_dim + 1]
A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1
]).astype(np.int32), batch_size, np.prod(A.shape[self.
batch_dim + 2:]).astype(np.int32)]
A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size,
output_dim, -1)
bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape(
batch_size, -1, 1)
bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1)
else:
batch_size = A.shape[1]
A = A.view(output_dim, batch_size, -1)
bias_new = A.matmul(bias.view(-1))
if isnan(bias_new):
return 0
else:
return bias_new
elif type(A) == Patches:
if torch.norm(A.patches, p=1) < epsilon:
return 0
if self.batch_dim != -1:
batch_size = bias.shape[0]
bias = F.unfold(bias, kernel_size=A.patches.size(-1),
stride=A.stride, padding=A.padding).transpose(-2, -1
).unsqueeze(-2)
bias.size(1)
patches = A.patches.view(A.patches.size(0), A.patches.size(
1), A.patches.size(-4), A.patches.size(-1) * A.patches.
size(-2) * A.patches.size(-3))
prod = bias * patches
bias_new = prod.sum(-1).transpose(-2, -1)
bias_new = bias_new.view(batch_size, bias_new.size(-2), int
(math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new.
size(-1))))
else:
patches = A.patches
patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias
patches_reshape = patches_reshape.transpose(-1, -2)
return patches_reshape.view(patches_reshape.size(0),
patches_reshape.size(1), int(math.sqrt(patches_reshape.
size(2))), -1).transpose(0, 1)
return bias_new
else:
return NotImplementedError()
class BoundSqrtNew(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
self.nonlinear = True
def infer_batch_dim(self, batch_size, *x):
return x[0]
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Mahoumaru/auto_LiRPA | BoundSqrt | false | 13,224 | [
"BSD-3-Clause"
]
| 0 | b03a6c36eb1b921726778359d6d2b94e0cd7e480 | https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480 |
CanineAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {})
# %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/5j/c5jll3kxtd32cl7pwubrb5oky2mtzckfgip2xbwad7crvvp4zk4r.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_default_2, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_default_2, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/kt/cktnex5febczl2ac6zugjmcksgsd5kjdufazv65vtepuwob3cb7a.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {})
# %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_default_2, -inf), kwargs = {})
# %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {})
# %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {})
# %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {})
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (x2), xmask)
tmp26 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = float("-inf")
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = (tmp4 != 0)
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = (tmp9 != 0)
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = (tmp15 != 0)
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = (tmp21 != 0)
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + (x2), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# context_layer_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/6m/c6mhj5zwirfhy5e4o45uaeov72uwfby4udubpm2fcz42iqvs2g57.py
# Topologically Sorted Source Nodes: [add, hidden_states_2], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# hidden_states_2 => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/l3/cl3bnd5pv2p4ydfmlj74bv4mbiwr2ntrdvbubnjubetyhosmxag6.py
# Topologically Sorted Source Nodes: [add, hidden_states_2], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# hidden_states_2 => add_1, add_2, mul, mul_1, rsqrt, sub_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_10), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_11), kwargs = {})
triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(buf5, buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(buf2, primals_7, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_7
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf9, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11)
del primals_9
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [add, hidden_states_2], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_5.run(buf11, primals_3, buf12, buf13, 16, grid=grid(16), stream=stream0)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, hidden_states_2], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_6.run(buf11, primals_3, buf12, buf13, primals_10, primals_11, buf14, 64, grid=grid(64), stream=stream0)
del buf12
del buf13
del primals_11
return (buf14, primals_3, primals_10, buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), buf11, primals_8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
from typing import List
from typing import Tuple
from torch import nn
from typing import Set
import torch.utils.checkpoint
def find_pruneable_heads_and_indices(heads: 'List[int]', n_heads: 'int',
head_size: 'int', already_pruned_heads: 'Set[int]') ->Tuple[Set[int],
torch.LongTensor]:
"""
Finds the heads and their indices taking :obj:`already_pruned_heads` into account.
Args:
heads (:obj:`List[int]`): List of the indices of heads to prune.
n_heads (:obj:`int`): The number of heads in the model.
head_size (:obj:`int`): The size of each head.
already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.
Returns:
:obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
"""
mask = torch.ones(n_heads, head_size)
heads = set(heads) - already_pruned_heads
for head in heads:
head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index: 'torch.LongTensor' = torch.arange(len(mask))[mask].long()
return heads, index
def prune_linear_layer(layer: 'nn.Linear', index: 'torch.LongTensor', dim:
'int'=0) ->nn.Linear:
"""
Prune a linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`torch.nn.Linear`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.
Returns:
:obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None
)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
class CanineSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if (config.hidden_size % config.num_attention_heads != 0 and not
hasattr(config, 'embedding_size')):
raise ValueError(
f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})'
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config,
'position_embedding_type', 'absolute')
if (self.position_embedding_type == 'relative_key' or self.
position_embedding_type == 'relative_key_query'):
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.
max_position_embeddings - 1, self.attention_head_size)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, from_tensor, to_tensor, attention_mask=None,
head_mask=None, output_attentions=False):
mixed_query_layer = self.query(from_tensor)
key_layer = self.transpose_for_scores(self.key(to_tensor))
value_layer = self.transpose_for_scores(self.value(to_tensor))
query_layer = self.transpose_for_scores(mixed_query_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
if (self.position_embedding_type == 'relative_key' or self.
position_embedding_type == 'relative_key_query'):
seq_length = from_tensor.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long,
device=from_tensor.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long,
device=from_tensor.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.
max_position_embeddings - 1)
positional_embedding = positional_embedding
if self.position_embedding_type == 'relative_key':
relative_position_scores = torch.einsum('bhld,lrd->bhlr',
query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == 'relative_key_query':
relative_position_scores_query = torch.einsum('bhld,lrd->bhlr',
query_layer, positional_embedding)
relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr',
key_layer, positional_embedding)
attention_scores = (attention_scores +
relative_position_scores_query +
relative_position_scores_key)
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
if attention_mask is not None:
if attention_mask.ndim == 3:
attention_mask = torch.unsqueeze(attention_mask, dim=1)
attention_mask = (1.0 - attention_mask.float()) * -10000.0
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (
context_layer,)
return outputs
class CanineSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class CanineAttention(nn.Module):
"""
Additional arguments related to local attention:
- **local** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether to apply local attention.
- **always_attend_to_first_position** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Should all blocks
be able to attend
to the :obj:`to_tensor`'s first position (e.g. a [CLS] position)? - **first_position_attends_to_all**
(:obj:`bool`, `optional`, defaults to :obj:`False`) -- Should the `from_tensor`'s first position be able to
attend to all positions within the `from_tensor`? - **attend_from_chunk_width** (:obj:`int`, `optional`,
defaults to 128) -- The width of each block-wise chunk in :obj:`from_tensor`. - **attend_from_chunk_stride**
(:obj:`int`, `optional`, defaults to 128) -- The number of elements to skip when moving to the next block in
:obj:`from_tensor`. - **attend_to_chunk_width** (:obj:`int`, `optional`, defaults to 128) -- The width of each
block-wise chunk in `to_tensor`. - **attend_to_chunk_stride** (:obj:`int`, `optional`, defaults to 128) -- The
number of elements to skip when moving to the next block in :obj:`to_tensor`.
"""
def __init__(self, config, local=False, always_attend_to_first_position:
'bool'=False, first_position_attends_to_all: 'bool'=False,
attend_from_chunk_width: 'int'=128, attend_from_chunk_stride: 'int'
=128, attend_to_chunk_width: 'int'=128, attend_to_chunk_stride:
'int'=128):
super().__init__()
self.self = CanineSelfAttention(config)
self.output = CanineSelfOutput(config)
self.pruned_heads = set()
self.local = local
if attend_from_chunk_width < attend_from_chunk_stride:
raise ValueError(
'`attend_from_chunk_width` < `attend_from_chunk_stride`would cause sequence positions to get skipped.'
)
if attend_to_chunk_width < attend_to_chunk_stride:
raise ValueError(
'`attend_to_chunk_width` < `attend_to_chunk_stride`would cause sequence positions to get skipped.'
)
self.always_attend_to_first_position = always_attend_to_first_position
self.first_position_attends_to_all = first_position_attends_to_all
self.attend_from_chunk_width = attend_from_chunk_width
self.attend_from_chunk_stride = attend_from_chunk_stride
self.attend_to_chunk_width = attend_to_chunk_width
self.attend_to_chunk_stride = attend_to_chunk_stride
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.
num_attention_heads, self.self.attention_head_size, self.
pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(
heads)
self.self.all_head_size = (self.self.attention_head_size * self.
self.num_attention_heads)
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states, attention_mask=None, head_mask=None,
output_attentions=False):
if not self.local:
self_outputs = self.self(hidden_states, hidden_states,
attention_mask, head_mask, output_attentions)
attention_output = self_outputs[0]
else:
from_seq_length = to_seq_length = hidden_states.shape[1]
from_tensor = to_tensor = hidden_states
from_chunks = []
if self.first_position_attends_to_all:
from_chunks.append((0, 1))
from_start = 1
else:
from_start = 0
for chunk_start in range(from_start, from_seq_length, self.
attend_from_chunk_stride):
chunk_end = min(from_seq_length, chunk_start + self.
attend_from_chunk_width)
from_chunks.append((chunk_start, chunk_end))
to_chunks = []
if self.first_position_attends_to_all:
to_chunks.append((0, to_seq_length))
for chunk_start in range(0, to_seq_length, self.
attend_to_chunk_stride):
chunk_end = min(to_seq_length, chunk_start + self.
attend_to_chunk_width)
to_chunks.append((chunk_start, chunk_end))
if len(from_chunks) != len(to_chunks):
raise ValueError(
f'Expected to have same number of `from_chunks` ({from_chunks}) and `to_chunks` ({from_chunks}). Check strides.'
)
attention_output_chunks = []
attention_probs_chunks = []
for (from_start, from_end), (to_start, to_end) in zip(from_chunks,
to_chunks):
from_tensor_chunk = from_tensor[:, from_start:from_end, :]
to_tensor_chunk = to_tensor[:, to_start:to_end, :]
attention_mask_chunk = attention_mask[:, from_start:
from_end, to_start:to_end]
if self.always_attend_to_first_position:
cls_attention_mask = attention_mask[:, from_start:
from_end, 0:1]
attention_mask_chunk = torch.cat([cls_attention_mask,
attention_mask_chunk], dim=2)
cls_position = to_tensor[:, 0:1, :]
to_tensor_chunk = torch.cat([cls_position,
to_tensor_chunk], dim=1)
attention_outputs_chunk = self.self(from_tensor_chunk,
to_tensor_chunk, attention_mask_chunk, head_mask,
output_attentions)
attention_output_chunks.append(attention_outputs_chunk[0])
if output_attentions:
attention_probs_chunks.append(attention_outputs_chunk[1])
attention_output = torch.cat(attention_output_chunks, dim=1)
attention_output = self.output(attention_output, hidden_states)
outputs = attention_output,
if not self.local:
outputs = outputs + self_outputs[1:]
else:
outputs = outputs + tuple(attention_probs_chunks)
return outputs
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5, position_embedding_type=4,
layer_norm_eps=1, hidden_dropout_prob=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
from typing import List
from typing import Tuple
from torch import nn
from typing import Set
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf8, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_7
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_9, reinterpret_tensor(buf10, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf11)
del primals_9
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](buf11, primals_3,
buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](buf11, primals_3,
buf12, buf13, primals_10, primals_11, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf12
del buf13
del primals_11
return buf14, primals_3, primals_10, buf7, reinterpret_tensor(buf8, (16,
1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), buf11, primals_8
def find_pruneable_heads_and_indices(heads: 'List[int]', n_heads: 'int',
head_size: 'int', already_pruned_heads: 'Set[int]') ->Tuple[Set[int],
torch.LongTensor]:
"""
Finds the heads and their indices taking :obj:`already_pruned_heads` into account.
Args:
heads (:obj:`List[int]`): List of the indices of heads to prune.
n_heads (:obj:`int`): The number of heads in the model.
head_size (:obj:`int`): The size of each head.
already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.
Returns:
:obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
"""
mask = torch.ones(n_heads, head_size)
heads = set(heads) - already_pruned_heads
for head in heads:
head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index: 'torch.LongTensor' = torch.arange(len(mask))[mask].long()
return heads, index
def prune_linear_layer(layer: 'nn.Linear', index: 'torch.LongTensor', dim:
'int'=0) ->nn.Linear:
"""
Prune a linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`torch.nn.Linear`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.
Returns:
:obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None
)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
class CanineSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if (config.hidden_size % config.num_attention_heads != 0 and not
hasattr(config, 'embedding_size')):
raise ValueError(
f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})'
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config,
'position_embedding_type', 'absolute')
if (self.position_embedding_type == 'relative_key' or self.
position_embedding_type == 'relative_key_query'):
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.
max_position_embeddings - 1, self.attention_head_size)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, from_tensor, to_tensor, attention_mask=None,
head_mask=None, output_attentions=False):
mixed_query_layer = self.query(from_tensor)
key_layer = self.transpose_for_scores(self.key(to_tensor))
value_layer = self.transpose_for_scores(self.value(to_tensor))
query_layer = self.transpose_for_scores(mixed_query_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
if (self.position_embedding_type == 'relative_key' or self.
position_embedding_type == 'relative_key_query'):
seq_length = from_tensor.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long,
device=from_tensor.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long,
device=from_tensor.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.
max_position_embeddings - 1)
positional_embedding = positional_embedding
if self.position_embedding_type == 'relative_key':
relative_position_scores = torch.einsum('bhld,lrd->bhlr',
query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == 'relative_key_query':
relative_position_scores_query = torch.einsum('bhld,lrd->bhlr',
query_layer, positional_embedding)
relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr',
key_layer, positional_embedding)
attention_scores = (attention_scores +
relative_position_scores_query +
relative_position_scores_key)
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
if attention_mask is not None:
if attention_mask.ndim == 3:
attention_mask = torch.unsqueeze(attention_mask, dim=1)
attention_mask = (1.0 - attention_mask.float()) * -10000.0
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (
context_layer,)
return outputs
class CanineSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class CanineAttentionNew(nn.Module):
"""
Additional arguments related to local attention:
- **local** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether to apply local attention.
- **always_attend_to_first_position** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Should all blocks
be able to attend
to the :obj:`to_tensor`'s first position (e.g. a [CLS] position)? - **first_position_attends_to_all**
(:obj:`bool`, `optional`, defaults to :obj:`False`) -- Should the `from_tensor`'s first position be able to
attend to all positions within the `from_tensor`? - **attend_from_chunk_width** (:obj:`int`, `optional`,
defaults to 128) -- The width of each block-wise chunk in :obj:`from_tensor`. - **attend_from_chunk_stride**
(:obj:`int`, `optional`, defaults to 128) -- The number of elements to skip when moving to the next block in
:obj:`from_tensor`. - **attend_to_chunk_width** (:obj:`int`, `optional`, defaults to 128) -- The width of each
block-wise chunk in `to_tensor`. - **attend_to_chunk_stride** (:obj:`int`, `optional`, defaults to 128) -- The
number of elements to skip when moving to the next block in :obj:`to_tensor`.
"""
def __init__(self, config, local=False, always_attend_to_first_position:
'bool'=False, first_position_attends_to_all: 'bool'=False,
attend_from_chunk_width: 'int'=128, attend_from_chunk_stride: 'int'
=128, attend_to_chunk_width: 'int'=128, attend_to_chunk_stride:
'int'=128):
super().__init__()
self.self = CanineSelfAttention(config)
self.output = CanineSelfOutput(config)
self.pruned_heads = set()
self.local = local
if attend_from_chunk_width < attend_from_chunk_stride:
raise ValueError(
'`attend_from_chunk_width` < `attend_from_chunk_stride`would cause sequence positions to get skipped.'
)
if attend_to_chunk_width < attend_to_chunk_stride:
raise ValueError(
'`attend_to_chunk_width` < `attend_to_chunk_stride`would cause sequence positions to get skipped.'
)
self.always_attend_to_first_position = always_attend_to_first_position
self.first_position_attends_to_all = first_position_attends_to_all
self.attend_from_chunk_width = attend_from_chunk_width
self.attend_from_chunk_stride = attend_from_chunk_stride
self.attend_to_chunk_width = attend_to_chunk_width
self.attend_to_chunk_stride = attend_to_chunk_stride
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.
num_attention_heads, self.self.attention_head_size, self.
pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(
heads)
self.self.all_head_size = (self.self.attention_head_size * self.
self.num_attention_heads)
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, input_0):
primals_1 = self.self.query.weight
primals_2 = self.self.query.bias
primals_4 = self.self.key.weight
primals_5 = self.self.key.bias
primals_6 = self.self.value.weight
primals_7 = self.self.value.bias
primals_8 = self.output.dense.weight
primals_9 = self.output.dense.bias
primals_10 = self.output.LayerNorm.weight
primals_11 = self.output.LayerNorm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| Clemens123/transformers | CanineAttention | false | 13,225 | [
"Apache-2.0"
]
| 0 | 22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26 | https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26 |
AlbertAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {})
# %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/5j/c5jll3kxtd32cl7pwubrb5oky2mtzckfgip2xbwad7crvvp4zk4r.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_default_2, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_default_2, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/kt/cktnex5febczl2ac6zugjmcksgsd5kjdufazv65vtepuwob3cb7a.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {})
# %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_default_2, -inf), kwargs = {})
# %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {})
# %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {})
# %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {})
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (x2), xmask)
tmp26 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = float("-inf")
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = (tmp4 != 0)
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = (tmp9 != 0)
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = (tmp15 != 0)
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = (tmp21 != 0)
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + (x2), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [projected_context_layer], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# projected_context_layer => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_15,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/zq/czqeiybdb6mlnwo4hmrayt3c44g7hbps2ftgdd7x2mv3sr2mwjbn.py
# Topologically Sorted Source Nodes: [projected_context_layer, add, layernormed_context_layer], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add_1
# layernormed_context_layer => var_mean
# projected_context_layer => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_9), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_1, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (1))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (2))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (3))
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
tl.store(out_ptr1 + (x0), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/v3/cv3tynim3vywiualr2ksfo6o4q7dligi2wlt2nm2akwhqfizltjs.py
# Topologically Sorted Source Nodes: [projected_context_layer, add, layernormed_context_layer], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add_1
# layernormed_context_layer => add_2, add_3, mul, mul_1, rsqrt, sub_1
# projected_context_layer => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_9), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_10), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_11), kwargs = {})
triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(buf5, buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(buf2, primals_7, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_7
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [projected_context_layer], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf9, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [projected_context_layer], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [projected_context_layer, add, layernormed_context_layer], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_5.run(primals_3, buf11, primals_9, buf12, buf13, 16, grid=grid(16), stream=stream0)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [projected_context_layer, add, layernormed_context_layer], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_6.run(primals_3, buf11, primals_9, buf12, buf13, primals_10, primals_11, buf14, 64, grid=grid(64), stream=stream0)
del buf12
del buf13
del primals_11
return (buf14, primals_3, primals_9, primals_10, buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), buf11, primals_8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
from typing import List
from typing import Tuple
from torch import nn
from typing import Set
import torch.utils.checkpoint
def find_pruneable_heads_and_indices(heads: 'List[int]', n_heads: 'int',
head_size: 'int', already_pruned_heads: 'Set[int]') ->Tuple[Set[int],
torch.LongTensor]:
"""
Finds the heads and their indices taking :obj:`already_pruned_heads` into account.
Args:
heads (:obj:`List[int]`): List of the indices of heads to prune.
n_heads (:obj:`int`): The number of heads in the model.
head_size (:obj:`int`): The size of each head.
already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.
Returns:
:obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
"""
mask = torch.ones(n_heads, head_size)
heads = set(heads) - already_pruned_heads
for head in heads:
head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index: 'torch.LongTensor' = torch.arange(len(mask))[mask].long()
return heads, index
def prune_linear_layer(layer: 'nn.Linear', index: 'torch.LongTensor', dim:
'int'=0) ->nn.Linear:
"""
Prune a linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`torch.nn.Linear`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.
Returns:
:obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None
)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
class AlbertAttention(nn.Module):
def __init__(self, config):
super().__init__()
if (config.hidden_size % config.num_attention_heads != 0 and not
hasattr(config, 'embedding_size')):
raise ValueError(
f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads}'
)
self.num_attention_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.attention_head_size = (config.hidden_size // config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob
)
self.output_dropout = nn.Dropout(config.hidden_dropout_prob)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.pruned_heads = set()
self.position_embedding_type = getattr(config,
'position_embedding_type', 'absolute')
if (self.position_embedding_type == 'relative_key' or self.
position_embedding_type == 'relative_key_query'):
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.
max_position_embeddings - 1, self.attention_head_size)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.
num_attention_heads, self.attention_head_size, self.pruned_heads)
self.query = prune_linear_layer(self.query, index)
self.key = prune_linear_layer(self.key, index)
self.value = prune_linear_layer(self.value, index)
self.dense = prune_linear_layer(self.dense, index, dim=1)
self.num_attention_heads = self.num_attention_heads - len(heads)
self.all_head_size = (self.attention_head_size * self.
num_attention_heads)
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states, attention_mask=None, head_mask=None,
output_attentions=False):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
if (self.position_embedding_type == 'relative_key' or self.
position_embedding_type == 'relative_key_query'):
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long,
device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long,
device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.
max_position_embeddings - 1)
positional_embedding = positional_embedding
if self.position_embedding_type == 'relative_key':
relative_position_scores = torch.einsum('bhld,lrd->bhlr',
query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == 'relative_key_query':
relative_position_scores_query = torch.einsum('bhld,lrd->bhlr',
query_layer, positional_embedding)
relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr',
key_layer, positional_embedding)
attention_scores = (attention_scores +
relative_position_scores_query +
relative_position_scores_key)
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.attention_dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(2, 1).flatten(2)
projected_context_layer = self.dense(context_layer)
projected_context_layer_dropout = self.output_dropout(
projected_context_layer)
layernormed_context_layer = self.LayerNorm(hidden_states +
projected_context_layer_dropout)
return (layernormed_context_layer, attention_probs
) if output_attentions else (layernormed_context_layer,)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5,
layer_norm_eps=1, position_embedding_type=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from typing import List
from typing import Tuple
from torch import nn
from typing import Set
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf8, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_7
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_3, buf11,
primals_9, buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_3, buf11,
primals_9, buf12, buf13, primals_10, primals_11, buf14, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf12
del buf13
del primals_11
return buf14, primals_3, primals_9, primals_10, buf7, reinterpret_tensor(
buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4
), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), buf11, primals_8
def find_pruneable_heads_and_indices(heads: 'List[int]', n_heads: 'int',
head_size: 'int', already_pruned_heads: 'Set[int]') ->Tuple[Set[int],
torch.LongTensor]:
"""
Finds the heads and their indices taking :obj:`already_pruned_heads` into account.
Args:
heads (:obj:`List[int]`): List of the indices of heads to prune.
n_heads (:obj:`int`): The number of heads in the model.
head_size (:obj:`int`): The size of each head.
already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.
Returns:
:obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
"""
mask = torch.ones(n_heads, head_size)
heads = set(heads) - already_pruned_heads
for head in heads:
head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index: 'torch.LongTensor' = torch.arange(len(mask))[mask].long()
return heads, index
def prune_linear_layer(layer: 'nn.Linear', index: 'torch.LongTensor', dim:
'int'=0) ->nn.Linear:
"""
Prune a linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`torch.nn.Linear`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.
Returns:
:obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None
)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
class AlbertAttentionNew(nn.Module):
def __init__(self, config):
super().__init__()
if (config.hidden_size % config.num_attention_heads != 0 and not
hasattr(config, 'embedding_size')):
raise ValueError(
f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads}'
)
self.num_attention_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.attention_head_size = (config.hidden_size // config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob
)
self.output_dropout = nn.Dropout(config.hidden_dropout_prob)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.pruned_heads = set()
self.position_embedding_type = getattr(config,
'position_embedding_type', 'absolute')
if (self.position_embedding_type == 'relative_key' or self.
position_embedding_type == 'relative_key_query'):
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.
max_position_embeddings - 1, self.attention_head_size)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.
num_attention_heads, self.attention_head_size, self.pruned_heads)
self.query = prune_linear_layer(self.query, index)
self.key = prune_linear_layer(self.key, index)
self.value = prune_linear_layer(self.value, index)
self.dense = prune_linear_layer(self.dense, index, dim=1)
self.num_attention_heads = self.num_attention_heads - len(heads)
self.all_head_size = (self.attention_head_size * self.
num_attention_heads)
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, input_0):
primals_1 = self.query.weight
primals_2 = self.query.bias
primals_4 = self.key.weight
primals_5 = self.key.bias
primals_6 = self.value.weight
primals_7 = self.value.bias
primals_8 = self.dense.weight
primals_9 = self.dense.bias
primals_10 = self.LayerNorm.weight
primals_11 = self.LayerNorm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| Clemens123/transformers | AlbertAttention | false | 13,226 | [
"Apache-2.0"
]
| 0 | 22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26 | https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26 |
BoundReciprocal | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/yb/cybxmqgstg473ic3ozmef5imn5esyxvm3ttfpkjco3dcshvnl2bq.py
# Topologically Sorted Source Nodes: [reciprocal], Original ATen: [aten.reciprocal]
# Source node to ATen node mapping:
# reciprocal => reciprocal
# Graph fragment:
# %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_reciprocal_0 = async_compile.triton('triton_poi_fused_reciprocal_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reciprocal_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp1 / tmp0
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [reciprocal], Original ATen: [aten.reciprocal]
stream0 = get_raw_stream(0)
triton_poi_fused_reciprocal_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from numbers import Number
from torch.nn import MSELoss
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
if not isinstance(eps, Number):
if not isinstance(eps, torch.Tensor):
self.eps = torch.tensor(eps)
else:
self.eps = eps
if len(self.eps.shape) == 1:
self.eps = torch.diag(self.eps)
assert self.eps.shape[0] == self.eps.shape[1
], 'Argument [eps] must form a n by n square matrix.'
self.norm = 2
else:
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
if isinstance(self.eps, Number):
deviation = A.norm(self.dual_norm, -1) * self.eps
else:
deviation = A.matmul(self.eps.transpose(0, 1)).norm(
self.dual_norm, -1)
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
elif isinstance(self.eps, Number):
bound = x + sign * self.eps
else:
bound = x + sign * self.eps.transpose(0, 1).norm(self.
dual_norm, -1)
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
center = (x_U + x_L) / 2.0
diff = (x_U - x_L) / 2.0
if not A.identity == 1:
unfold_input = F.unfold(center, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound = prod.view(prod.size(0), prod.size(1), int(math.
sqrt(prod.size(2))), int(math.sqrt(prod.size(2))))
unfold_input = F.unfold(diff, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches.abs()
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound += sign * prod.view(prod.size(0), prod.size(1),
int(math.sqrt(prod.size(2))), int(math.sqrt(prod.
size(2))))
else:
bound = center + sign * diff
return bound
else:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
raise NotImplementedError()
if isinstance(A, eyeC) or isinstance(A, torch.Tensor):
return concretize_matrix(A)
elif isinstance(A, Patches):
return concretize_patches(A)
elif isinstance(A, BoundList):
for b in A.bound_list:
if isinstance(b, eyeC) or isinstance(b, torch.Tensor):
pass
else:
raise NotImplementedError()
def init(self, x, aux=None, forward=False):
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
else:
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
if self.norm == np.inf:
if self.x_L is None and self.x_U is None:
return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps)
else:
return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})'
.format(self.eps, self.x_L, self.x_U))
else:
return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm,
self.eps)
class PerturbationSynonym(Perturbation):
def __init__(self, budget, eps=1.0, use_simple=False):
super(PerturbationSynonym, self).__init__()
self._load_synonyms()
self.budget = budget
self.eps = eps
self.use_simple = use_simple
self.model = None
self.train = False
def __repr__(self):
return (
'perturbation(Synonym-based word substitution budget={}, eps={})'
.format(self.budget, self.eps))
def _load_synonyms(self, path='data/synonyms.json'):
with open(path) as file:
self.synonym = json.loads(file.read())
logger.info('Synonym list loaded for {} words'.format(len(self.
synonym)))
def set_train(self, train):
self.train = train
def concretize(self, x, A, sign, aux):
assert self.model is not None
x_rep, mask, can_be_replaced = aux
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
dim_out = A.shape[1]
max_num_cand = x_rep.shape[2]
mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32,
device=A.device)
num_pos = int(np.max(np.sum(can_be_replaced, axis=-1)))
update_A = A.shape[-1] > num_pos * dim_word
if update_A:
bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape(
batch_size, -1, 1)).squeeze(-1)
else:
bias = 0.0
A = A.reshape(batch_size, dim_out, -1, dim_word)
A_new, x_new, x_rep_new, mask_new = [], [], [], []
zeros_A = torch.zeros(dim_out, dim_word, device=A.device)
zeros_w = torch.zeros(dim_word, device=A.device)
zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device)
zeros_mask = torch.zeros(max_num_cand, device=A.device)
for t in range(batch_size):
cnt = 0
for i in range(0, length):
if can_be_replaced[t][i]:
if update_A:
A_new.append(A[t, :, i, :])
x_new.append(x[t][i])
x_rep_new.append(x_rep[t][i])
mask_new.append(mask[t][i])
cnt += 1
if update_A:
A_new += [zeros_A] * (num_pos - cnt)
x_new += [zeros_w] * (num_pos - cnt)
x_rep_new += [zeros_rep] * (num_pos - cnt)
mask_new += [zeros_mask] * (num_pos - cnt)
if update_A:
A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word
).transpose(1, 2)
x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word)
x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos,
max_num_cand, dim_word)
mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand)
length = num_pos
A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2)
x = x.reshape(batch_size, length, -1, 1)
if sign == 1:
cmp, init = torch.max, -1e+30
else:
cmp, init = torch.min, 1e+30
init_tensor = torch.ones(batch_size, dim_out) * init
dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1)
]
dp[0][0] = torch.zeros(batch_size, dim_out)
A = A.reshape(batch_size * length, A.shape[2], A.shape[3])
Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x.
shape[3])).reshape(batch_size, length, A.shape[1])
Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length,
max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size,
length, A.shape[1], max_num_cand)
Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2)
Ax_rep_bound = cmp(Ax_rep, dim=-1).values
if self.use_simple and self.train:
return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias
for i in range(1, length + 1):
dp[i][0] = dp[i - 1][0] + Ax[:, i - 1]
for j in range(1, self.budget + 1):
dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1
] + Ax_rep_bound[:, i - 1])
dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1,
batch_size, dim_out)
return cmp(dp, dim=0).values + bias
def init(self, x, aux=None, forward=False):
tokens, batch = aux
self.tokens = tokens
assert len(x.shape) == 3
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
max_pos = 1
can_be_replaced = np.zeros((batch_size, length), dtype=np.bool)
self._build_substitution(batch)
for t in range(batch_size):
cnt = 0
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
for i in range(len(tokens[t])):
if tokens[t][i] == '[UNK]' or len(candidates[i]
) == 0 or tokens[t][i] != candidates[i][0]:
continue
for w in candidates[i][1:]:
if w in self.model.vocab:
can_be_replaced[t][i] = True
cnt += 1
break
max_pos = max(max_pos, cnt)
dim = max_pos * dim_word
if forward:
eye = torch.eye(dim_word)
lw = torch.zeros(batch_size, dim, length, dim_word)
lb = torch.zeros_like(x)
word_embeddings = self.model.word_embeddings.weight
vocab = self.model.vocab
x_rep = [[[] for i in range(length)] for t in range(batch_size)]
max_num_cand = 1
for t in range(batch_size):
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
cnt = 0
for i in range(length):
if can_be_replaced[t][i]:
word_embed = word_embeddings[vocab[tokens[t][i]]]
other_embed = x[t, i] - word_embed
if forward:
lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye
lb[t, i, :] = torch.zeros_like(word_embed)
for w in candidates[i][1:]:
if w in self.model.vocab:
x_rep[t][i].append(word_embeddings[self.model.
vocab[w]] + other_embed)
max_num_cand = max(max_num_cand, len(x_rep[t][i]))
cnt += 1
elif forward:
lb[t, i, :] = x[t, i, :]
if forward:
uw, ub = lw, lb
else:
lw = lb = uw = ub = None
zeros = torch.zeros(dim_word, device=x.device)
x_rep_, mask = [], []
for t in range(batch_size):
for i in range(length):
x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep
[t][i]))
mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len(
x_rep[t][i]))
x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand,
dim_word)
mask = torch.tensor(mask, dtype=torch.float32, device=x.device
).reshape(batch_size, length, max_num_cand)
x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps)
inf = 1e+20
lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * inf, dim=2).values
upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * -inf, dim=2).values
lower = torch.min(lower, x)
upper = torch.max(upper, x)
return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask,
can_be_replaced)
def _build_substitution(self, batch):
for t, example in enumerate(batch):
if 'candidates' not in example or example['candidates'] is None:
candidates = []
tokens = example['sentence'].strip().lower().split(' ')
for i in range(len(tokens)):
_cand = []
if tokens[i] in self.synonym:
for w in self.synonym[tokens[i]]:
if w in self.model.vocab:
_cand.append(w)
if len(_cand) > 0:
_cand = [tokens[i]] + _cand
candidates.append(_cand)
example['candidates'] = candidates
class Interval(tuple):
def __new__(self, lb=None, ub=None, ptb=None):
if ub is None:
assert isinstance(lb, tuple)
lb, ub = lb
return tuple.__new__(Interval, (lb, ub))
def __init__(self, lb, ub, ptb=None):
if ptb is None:
self.ptb = None
assert lb is ub
elif not isinstance(ptb, Perturbation):
raise ValueError(
'ptb must be a Perturbation object or None. Got type {}'.
format(type(ptb)))
else:
self.ptb = ptb
def __str__(self):
return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb)
def __repr__(self):
return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1],
self.ptb)
"""Checking if the other interval is tuple, keep the perturbation."""
@staticmethod
def make_interval(lb, ub, other):
if isinstance(other, Interval):
return Interval(lb, ub, other.ptb)
else:
return lb, ub
"""Given a tuple or Interval object, returns the norm and eps."""
@staticmethod
def get_perturbation(interval):
if isinstance(interval, Interval):
if isinstance(interval.ptb, PerturbationLpNorm):
return interval.ptb.norm, interval.ptb.eps
elif isinstance(interval.ptb, PerturbationSynonym):
return np.inf, 1.0
elif isinstance(interval.ptb, PerturbationL0Norm):
return 0, interval.ptb.eps, interval.ptb.ratio
elif interval.ptb is None:
raise RuntimeError(
'get_perturbation() encountered an interval that is not perturbed.'
)
else:
raise RuntimeError(
'get_perturbation() does not know how to handle {}'.
format(type(interval.ptb)))
else:
return np.inf, np.nan
"""Checking if a Interval or tuple object has perturbation enabled."""
@staticmethod
def is_perturbed(interval):
if isinstance(interval, Interval) and interval.ptb is None:
return False
else:
return True
class Bound(nn.Module):
def __init__(self, input_name, name, ori_name, attr={}, inputs=[],
output_index=0, options={}, device=None):
super().__init__()
self.output_name = []
(self.input_name, self.name, self.ori_name, self.attr, self.inputs,
self.output_index, self.options, self.device) = (input_name,
name, ori_name, attr, inputs, output_index, options, device)
self.fv = None
self.from_input = False
self.bounded = False
self.IBP_rets = None
self.perturbed = False
if options is not None and 'loss_fusion' in options:
self.loss_fusion = options['loss_fusion']
else:
self.loss_fusion = False
"""Check if the i-th input is with perturbation or not."""
def is_input_perturbed(self, i=0):
return self.inputs[i].perturbed
def forward(self, *x):
raise NotImplementedError
def interval_propagate(self, *v):
assert len(v) == 1
h_L, h_U = v[0]
return Interval.make_interval(self.forward(h_L), self.forward(h_U),
v[0])
def bound_forward(self, dim_in, last):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA):
raise NotImplementedError
def infer_batch_dim(self, batch_size, *x):
None
raise NotImplementedError
def broadcast_backward(self, A, x):
shape = x.default_shape
batch_dim = max(self.batch_dim, 0)
if isinstance(A, torch.Tensor):
if x.batch_dim == -1:
shape = torch.Size([A.shape[batch_dim + 1]] + list(shape))
dims = []
cnt_sum = A.ndim - len(shape) - 1
for i in range(1, A.ndim):
if i != self.batch_dim + 1 and cnt_sum > 0:
dims.append(i)
cnt_sum -= 1
if dims:
A = torch.sum(A, dim=dims)
else:
dims = list(range(1, 1 + A.ndim - 1 - len(shape)))
if dims:
A = torch.sum(A, dim=dims)
dims = []
for i in range(len(shape)):
if shape[i] == 1 and A.shape[i + 1] != 1:
dims.append(i + 1)
if dims:
A = torch.sum(A, dim=dims, keepdim=True)
assert A.shape[1:] == shape
elif type(A) == Patches:
pass
return A
@staticmethod
def broadcast_forward(dim_in, x, shape_res):
lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub
shape_x, shape_res = list(x.lb.shape), list(shape_res)
if lw is None:
lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device)
has_batch_size = False
else:
has_batch_size = True
while len(shape_x) < len(shape_res):
if not has_batch_size:
lw, uw = lw.unsqueeze(0), uw.unsqueeze(0)
lb, ub = lb.unsqueeze(0), ub.unsqueeze(0)
shape_x = [1] + shape_x
has_batch_size = True
else:
lw, uw = lw.unsqueeze(2), uw.unsqueeze(2)
lb, ub = lb.unsqueeze(1), ub.unsqueeze(1)
shape_x = [shape_x[0], 1] + shape_x[1:]
repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))]
lb, ub = lb.repeat(*repeat), ub.repeat(*repeat)
repeat = repeat[:1] + [1] + repeat[1:]
lw, uw = lw.repeat(*repeat), uw.repeat(*repeat)
return lw, lb, uw, ub
def get_bias(self, A, bias):
if A is None:
return 0
assert not isnan(A)
assert not isnan(bias)
if isinstance(A, torch.Tensor):
if torch.norm(A, p=1) < epsilon:
return 0
output_dim = A.shape[0]
if self.batch_dim != -1:
batch_size = A.shape[self.batch_dim + 1]
A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1
]).astype(np.int32), batch_size, np.prod(A.shape[self.
batch_dim + 2:]).astype(np.int32)]
A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size,
output_dim, -1)
bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape(
batch_size, -1, 1)
bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1)
else:
batch_size = A.shape[1]
A = A.view(output_dim, batch_size, -1)
bias_new = A.matmul(bias.view(-1))
if isnan(bias_new):
return 0
else:
return bias_new
elif type(A) == Patches:
if torch.norm(A.patches, p=1) < epsilon:
return 0
if self.batch_dim != -1:
batch_size = bias.shape[0]
bias = F.unfold(bias, kernel_size=A.patches.size(-1),
stride=A.stride, padding=A.padding).transpose(-2, -1
).unsqueeze(-2)
bias.size(1)
patches = A.patches.view(A.patches.size(0), A.patches.size(
1), A.patches.size(-4), A.patches.size(-1) * A.patches.
size(-2) * A.patches.size(-3))
prod = bias * patches
bias_new = prod.sum(-1).transpose(-2, -1)
bias_new = bias_new.view(batch_size, bias_new.size(-2), int
(math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new.
size(-1))))
else:
patches = A.patches
patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias
patches_reshape = patches_reshape.transpose(-1, -2)
return patches_reshape.view(patches_reshape.size(0),
patches_reshape.size(1), int(math.sqrt(patches_reshape.
size(2))), -1).transpose(0, 1)
return bias_new
else:
return NotImplementedError()
class BoundActivation(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
self.nonlinear = True
self.relaxed = False
def _init_linear(self, x):
self.mask_pos = torch.gt(x.lower, 0)
self.mask_neg = torch.lt(x.upper, 0)
self.mask_both = 1 - self.mask_pos - self.mask_neg
self.lw = torch.zeros(x.lower.shape, device=self.device)
self.lb = self.lw.clone()
self.uw = self.lw.clone()
self.ub = self.lw.clone()
def _add_linear(self, mask, type, k, x0, y0):
if mask is None:
mask = 1
if type == 'lower':
w_out, b_out = self.lw, self.lb
else:
w_out, b_out = self.uw, self.ub
w_out += mask * k
b_out += mask * (-x0 * k + y0)
def bound_relax(self, x):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA, x):
if not self.relaxed:
self._init_linear(x)
self.bound_relax(x)
def _bound_oneside(last_A, sign=-1):
if last_A is None:
return None, 0
if self.batch_dim == 0:
if sign == -1:
_A = last_A.clamp(min=0) * self.lw.unsqueeze(0
) + last_A.clamp(max=0) * self.uw.unsqueeze(0)
_bias = last_A.clamp(min=0) * self.lb.unsqueeze(0
) + last_A.clamp(max=0) * self.ub.unsqueeze(0)
elif sign == 1:
_A = last_A.clamp(min=0) * self.uw.unsqueeze(0
) + last_A.clamp(max=0) * self.lw.unsqueeze(0)
_bias = last_A.clamp(min=0) * self.ub.unsqueeze(0
) + last_A.clamp(max=0) * self.lb.unsqueeze(0)
while _bias.ndim > 2:
_bias = torch.sum(_bias, dim=-1)
elif self.batch_dim == -1:
mask = torch.gt(last_A, 0.0)
if sign == -1:
_A = last_A * (mask * self.lw.unsqueeze(0).unsqueeze(1) +
(1 - mask) * self.uw.unsqueeze(0).unsqueeze(1))
_bias = last_A * (mask * self.lb.unsqueeze(0).unsqueeze
(1) + (1 - mask) * self.ub.unsqueeze(0).unsqueeze(1))
elif sign == 1:
_A = last_A * (mask * self.uw.unsqueeze(0).unsqueeze(1) +
(1 - mask) * self.lw.unsqueeze(0).unsqueeze(1))
_bias = last_A * (mask * self.ub.unsqueeze(0).unsqueeze
(1) + (1 - mask) * self.lb.unsqueeze(0).unsqueeze(1))
while _bias.ndim > 2:
_bias = torch.sum(_bias, dim=-1)
else:
raise NotImplementedError
return _A, _bias
lA, lbias = _bound_oneside(last_lA, sign=-1)
uA, ubias = _bound_oneside(last_uA, sign=+1)
return [(lA, uA)], lbias, ubias
def bound_forward(self, dim_in, x):
if not self.relaxed:
self._init_linear(x)
self.bound_relax(x)
if self.lw.ndim > 0:
if x.lw is not None:
lw = self.lw.unsqueeze(1).clamp(min=0
) * x.lw + self.lw.unsqueeze(1).clamp(max=0) * x.uw
uw = self.uw.unsqueeze(1).clamp(max=0
) * x.lw + self.uw.unsqueeze(1).clamp(min=0) * x.uw
else:
lw = uw = None
elif x.lw is not None:
lw = self.lw.unsqueeze(0).clamp(min=0) * x.lw + self.lw.unsqueeze(0
).clamp(max=0) * x.uw
uw = self.uw.unsqueeze(0).clamp(min=0) * x.lw + self.uw.unsqueeze(0
).clamp(max=0) * x.uw
else:
lw = uw = None
lb = self.lw.clamp(min=0) * x.lb + self.lw.clamp(max=0
) * x.ub + self.lb
ub = self.uw.clamp(max=0) * x.lb + self.uw.clamp(min=0
) * x.ub + self.ub
return LinearBound(lw, lb, uw, ub)
def infer_batch_dim(self, batch_size, *x):
return x[0]
class BoundReciprocal(BoundActivation):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
self.nonlinear = True
def forward(self, x):
return torch.reciprocal(x)
def bound_relax(self, x):
m = (x.lower + x.upper) / 2
kl = -1 / m.pow(2)
self._add_linear(mask=None, type='lower', k=kl, x0=m, y0=1.0 / m)
ku = -1.0 / (x.lower * x.upper)
self._add_linear(mask=None, type='upper', k=ku, x0=x.lower, y0=1.0 /
x.lower)
def interval_propagate(self, *v):
h_L, h_U = v[0]
return torch.reciprocal(h_U.float()), torch.reciprocal(h_L.float())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_name': 4, 'name': 4, 'ori_name': 4, 'attr': 4,
'inputs': 4, 'output_index': 4, 'options': _mock_config(loss_fusion
=MSELoss()), 'device': 0}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from numbers import Number
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp1 / tmp0
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reciprocal_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
if not isinstance(eps, Number):
if not isinstance(eps, torch.Tensor):
self.eps = torch.tensor(eps)
else:
self.eps = eps
if len(self.eps.shape) == 1:
self.eps = torch.diag(self.eps)
assert self.eps.shape[0] == self.eps.shape[1
], 'Argument [eps] must form a n by n square matrix.'
self.norm = 2
else:
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
if isinstance(self.eps, Number):
deviation = A.norm(self.dual_norm, -1) * self.eps
else:
deviation = A.matmul(self.eps.transpose(0, 1)).norm(
self.dual_norm, -1)
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
elif isinstance(self.eps, Number):
bound = x + sign * self.eps
else:
bound = x + sign * self.eps.transpose(0, 1).norm(self.
dual_norm, -1)
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
center = (x_U + x_L) / 2.0
diff = (x_U - x_L) / 2.0
if not A.identity == 1:
unfold_input = F.unfold(center, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound = prod.view(prod.size(0), prod.size(1), int(math.
sqrt(prod.size(2))), int(math.sqrt(prod.size(2))))
unfold_input = F.unfold(diff, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches.abs()
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound += sign * prod.view(prod.size(0), prod.size(1),
int(math.sqrt(prod.size(2))), int(math.sqrt(prod.
size(2))))
else:
bound = center + sign * diff
return bound
else:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
raise NotImplementedError()
if isinstance(A, eyeC) or isinstance(A, torch.Tensor):
return concretize_matrix(A)
elif isinstance(A, Patches):
return concretize_patches(A)
elif isinstance(A, BoundList):
for b in A.bound_list:
if isinstance(b, eyeC) or isinstance(b, torch.Tensor):
pass
else:
raise NotImplementedError()
def init(self, x, aux=None, forward=False):
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
else:
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
if self.norm == np.inf:
if self.x_L is None and self.x_U is None:
return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps)
else:
return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})'
.format(self.eps, self.x_L, self.x_U))
else:
return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm,
self.eps)
class PerturbationSynonym(Perturbation):
def __init__(self, budget, eps=1.0, use_simple=False):
super(PerturbationSynonym, self).__init__()
self._load_synonyms()
self.budget = budget
self.eps = eps
self.use_simple = use_simple
self.model = None
self.train = False
def __repr__(self):
return (
'perturbation(Synonym-based word substitution budget={}, eps={})'
.format(self.budget, self.eps))
def _load_synonyms(self, path='data/synonyms.json'):
with open(path) as file:
self.synonym = json.loads(file.read())
logger.info('Synonym list loaded for {} words'.format(len(self.
synonym)))
def set_train(self, train):
self.train = train
def concretize(self, x, A, sign, aux):
assert self.model is not None
x_rep, mask, can_be_replaced = aux
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
dim_out = A.shape[1]
max_num_cand = x_rep.shape[2]
mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32,
device=A.device)
num_pos = int(np.max(np.sum(can_be_replaced, axis=-1)))
update_A = A.shape[-1] > num_pos * dim_word
if update_A:
bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape(
batch_size, -1, 1)).squeeze(-1)
else:
bias = 0.0
A = A.reshape(batch_size, dim_out, -1, dim_word)
A_new, x_new, x_rep_new, mask_new = [], [], [], []
zeros_A = torch.zeros(dim_out, dim_word, device=A.device)
zeros_w = torch.zeros(dim_word, device=A.device)
zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device)
zeros_mask = torch.zeros(max_num_cand, device=A.device)
for t in range(batch_size):
cnt = 0
for i in range(0, length):
if can_be_replaced[t][i]:
if update_A:
A_new.append(A[t, :, i, :])
x_new.append(x[t][i])
x_rep_new.append(x_rep[t][i])
mask_new.append(mask[t][i])
cnt += 1
if update_A:
A_new += [zeros_A] * (num_pos - cnt)
x_new += [zeros_w] * (num_pos - cnt)
x_rep_new += [zeros_rep] * (num_pos - cnt)
mask_new += [zeros_mask] * (num_pos - cnt)
if update_A:
A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word
).transpose(1, 2)
x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word)
x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos,
max_num_cand, dim_word)
mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand)
length = num_pos
A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2)
x = x.reshape(batch_size, length, -1, 1)
if sign == 1:
cmp, init = torch.max, -1e+30
else:
cmp, init = torch.min, 1e+30
init_tensor = torch.ones(batch_size, dim_out) * init
dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1)
]
dp[0][0] = torch.zeros(batch_size, dim_out)
A = A.reshape(batch_size * length, A.shape[2], A.shape[3])
Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x.
shape[3])).reshape(batch_size, length, A.shape[1])
Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length,
max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size,
length, A.shape[1], max_num_cand)
Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2)
Ax_rep_bound = cmp(Ax_rep, dim=-1).values
if self.use_simple and self.train:
return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias
for i in range(1, length + 1):
dp[i][0] = dp[i - 1][0] + Ax[:, i - 1]
for j in range(1, self.budget + 1):
dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1
] + Ax_rep_bound[:, i - 1])
dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1,
batch_size, dim_out)
return cmp(dp, dim=0).values + bias
def init(self, x, aux=None, forward=False):
tokens, batch = aux
self.tokens = tokens
assert len(x.shape) == 3
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
max_pos = 1
can_be_replaced = np.zeros((batch_size, length), dtype=np.bool)
self._build_substitution(batch)
for t in range(batch_size):
cnt = 0
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
for i in range(len(tokens[t])):
if tokens[t][i] == '[UNK]' or len(candidates[i]
) == 0 or tokens[t][i] != candidates[i][0]:
continue
for w in candidates[i][1:]:
if w in self.model.vocab:
can_be_replaced[t][i] = True
cnt += 1
break
max_pos = max(max_pos, cnt)
dim = max_pos * dim_word
if forward:
eye = torch.eye(dim_word)
lw = torch.zeros(batch_size, dim, length, dim_word)
lb = torch.zeros_like(x)
word_embeddings = self.model.word_embeddings.weight
vocab = self.model.vocab
x_rep = [[[] for i in range(length)] for t in range(batch_size)]
max_num_cand = 1
for t in range(batch_size):
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
cnt = 0
for i in range(length):
if can_be_replaced[t][i]:
word_embed = word_embeddings[vocab[tokens[t][i]]]
other_embed = x[t, i] - word_embed
if forward:
lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye
lb[t, i, :] = torch.zeros_like(word_embed)
for w in candidates[i][1:]:
if w in self.model.vocab:
x_rep[t][i].append(word_embeddings[self.model.
vocab[w]] + other_embed)
max_num_cand = max(max_num_cand, len(x_rep[t][i]))
cnt += 1
elif forward:
lb[t, i, :] = x[t, i, :]
if forward:
uw, ub = lw, lb
else:
lw = lb = uw = ub = None
zeros = torch.zeros(dim_word, device=x.device)
x_rep_, mask = [], []
for t in range(batch_size):
for i in range(length):
x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep
[t][i]))
mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len(
x_rep[t][i]))
x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand,
dim_word)
mask = torch.tensor(mask, dtype=torch.float32, device=x.device
).reshape(batch_size, length, max_num_cand)
x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps)
inf = 1e+20
lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * inf, dim=2).values
upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * -inf, dim=2).values
lower = torch.min(lower, x)
upper = torch.max(upper, x)
return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask,
can_be_replaced)
def _build_substitution(self, batch):
for t, example in enumerate(batch):
if 'candidates' not in example or example['candidates'] is None:
candidates = []
tokens = example['sentence'].strip().lower().split(' ')
for i in range(len(tokens)):
_cand = []
if tokens[i] in self.synonym:
for w in self.synonym[tokens[i]]:
if w in self.model.vocab:
_cand.append(w)
if len(_cand) > 0:
_cand = [tokens[i]] + _cand
candidates.append(_cand)
example['candidates'] = candidates
class Interval(tuple):
def __new__(self, lb=None, ub=None, ptb=None):
if ub is None:
assert isinstance(lb, tuple)
lb, ub = lb
return tuple.__new__(Interval, (lb, ub))
def __init__(self, lb, ub, ptb=None):
if ptb is None:
self.ptb = None
assert lb is ub
elif not isinstance(ptb, Perturbation):
raise ValueError(
'ptb must be a Perturbation object or None. Got type {}'.
format(type(ptb)))
else:
self.ptb = ptb
def __str__(self):
return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb)
def __repr__(self):
return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1],
self.ptb)
"""Checking if the other interval is tuple, keep the perturbation."""
@staticmethod
def make_interval(lb, ub, other):
if isinstance(other, Interval):
return Interval(lb, ub, other.ptb)
else:
return lb, ub
"""Given a tuple or Interval object, returns the norm and eps."""
@staticmethod
def get_perturbation(interval):
if isinstance(interval, Interval):
if isinstance(interval.ptb, PerturbationLpNorm):
return interval.ptb.norm, interval.ptb.eps
elif isinstance(interval.ptb, PerturbationSynonym):
return np.inf, 1.0
elif isinstance(interval.ptb, PerturbationL0Norm):
return 0, interval.ptb.eps, interval.ptb.ratio
elif interval.ptb is None:
raise RuntimeError(
'get_perturbation() encountered an interval that is not perturbed.'
)
else:
raise RuntimeError(
'get_perturbation() does not know how to handle {}'.
format(type(interval.ptb)))
else:
return np.inf, np.nan
"""Checking if a Interval or tuple object has perturbation enabled."""
@staticmethod
def is_perturbed(interval):
if isinstance(interval, Interval) and interval.ptb is None:
return False
else:
return True
class Bound(nn.Module):
def __init__(self, input_name, name, ori_name, attr={}, inputs=[],
output_index=0, options={}, device=None):
super().__init__()
self.output_name = []
(self.input_name, self.name, self.ori_name, self.attr, self.inputs,
self.output_index, self.options, self.device) = (input_name,
name, ori_name, attr, inputs, output_index, options, device)
self.fv = None
self.from_input = False
self.bounded = False
self.IBP_rets = None
self.perturbed = False
if options is not None and 'loss_fusion' in options:
self.loss_fusion = options['loss_fusion']
else:
self.loss_fusion = False
"""Check if the i-th input is with perturbation or not."""
def is_input_perturbed(self, i=0):
return self.inputs[i].perturbed
def forward(self, *x):
raise NotImplementedError
def interval_propagate(self, *v):
assert len(v) == 1
h_L, h_U = v[0]
return Interval.make_interval(self.forward(h_L), self.forward(h_U),
v[0])
def bound_forward(self, dim_in, last):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA):
raise NotImplementedError
def infer_batch_dim(self, batch_size, *x):
None
raise NotImplementedError
def broadcast_backward(self, A, x):
shape = x.default_shape
batch_dim = max(self.batch_dim, 0)
if isinstance(A, torch.Tensor):
if x.batch_dim == -1:
shape = torch.Size([A.shape[batch_dim + 1]] + list(shape))
dims = []
cnt_sum = A.ndim - len(shape) - 1
for i in range(1, A.ndim):
if i != self.batch_dim + 1 and cnt_sum > 0:
dims.append(i)
cnt_sum -= 1
if dims:
A = torch.sum(A, dim=dims)
else:
dims = list(range(1, 1 + A.ndim - 1 - len(shape)))
if dims:
A = torch.sum(A, dim=dims)
dims = []
for i in range(len(shape)):
if shape[i] == 1 and A.shape[i + 1] != 1:
dims.append(i + 1)
if dims:
A = torch.sum(A, dim=dims, keepdim=True)
assert A.shape[1:] == shape
elif type(A) == Patches:
pass
return A
@staticmethod
def broadcast_forward(dim_in, x, shape_res):
lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub
shape_x, shape_res = list(x.lb.shape), list(shape_res)
if lw is None:
lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device)
has_batch_size = False
else:
has_batch_size = True
while len(shape_x) < len(shape_res):
if not has_batch_size:
lw, uw = lw.unsqueeze(0), uw.unsqueeze(0)
lb, ub = lb.unsqueeze(0), ub.unsqueeze(0)
shape_x = [1] + shape_x
has_batch_size = True
else:
lw, uw = lw.unsqueeze(2), uw.unsqueeze(2)
lb, ub = lb.unsqueeze(1), ub.unsqueeze(1)
shape_x = [shape_x[0], 1] + shape_x[1:]
repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))]
lb, ub = lb.repeat(*repeat), ub.repeat(*repeat)
repeat = repeat[:1] + [1] + repeat[1:]
lw, uw = lw.repeat(*repeat), uw.repeat(*repeat)
return lw, lb, uw, ub
def get_bias(self, A, bias):
if A is None:
return 0
assert not isnan(A)
assert not isnan(bias)
if isinstance(A, torch.Tensor):
if torch.norm(A, p=1) < epsilon:
return 0
output_dim = A.shape[0]
if self.batch_dim != -1:
batch_size = A.shape[self.batch_dim + 1]
A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1
]).astype(np.int32), batch_size, np.prod(A.shape[self.
batch_dim + 2:]).astype(np.int32)]
A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size,
output_dim, -1)
bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape(
batch_size, -1, 1)
bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1)
else:
batch_size = A.shape[1]
A = A.view(output_dim, batch_size, -1)
bias_new = A.matmul(bias.view(-1))
if isnan(bias_new):
return 0
else:
return bias_new
elif type(A) == Patches:
if torch.norm(A.patches, p=1) < epsilon:
return 0
if self.batch_dim != -1:
batch_size = bias.shape[0]
bias = F.unfold(bias, kernel_size=A.patches.size(-1),
stride=A.stride, padding=A.padding).transpose(-2, -1
).unsqueeze(-2)
bias.size(1)
patches = A.patches.view(A.patches.size(0), A.patches.size(
1), A.patches.size(-4), A.patches.size(-1) * A.patches.
size(-2) * A.patches.size(-3))
prod = bias * patches
bias_new = prod.sum(-1).transpose(-2, -1)
bias_new = bias_new.view(batch_size, bias_new.size(-2), int
(math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new.
size(-1))))
else:
patches = A.patches
patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias
patches_reshape = patches_reshape.transpose(-1, -2)
return patches_reshape.view(patches_reshape.size(0),
patches_reshape.size(1), int(math.sqrt(patches_reshape.
size(2))), -1).transpose(0, 1)
return bias_new
else:
return NotImplementedError()
class BoundActivation(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
self.nonlinear = True
self.relaxed = False
def _init_linear(self, x):
self.mask_pos = torch.gt(x.lower, 0)
self.mask_neg = torch.lt(x.upper, 0)
self.mask_both = 1 - self.mask_pos - self.mask_neg
self.lw = torch.zeros(x.lower.shape, device=self.device)
self.lb = self.lw.clone()
self.uw = self.lw.clone()
self.ub = self.lw.clone()
def _add_linear(self, mask, type, k, x0, y0):
if mask is None:
mask = 1
if type == 'lower':
w_out, b_out = self.lw, self.lb
else:
w_out, b_out = self.uw, self.ub
w_out += mask * k
b_out += mask * (-x0 * k + y0)
def bound_relax(self, x):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA, x):
if not self.relaxed:
self._init_linear(x)
self.bound_relax(x)
def _bound_oneside(last_A, sign=-1):
if last_A is None:
return None, 0
if self.batch_dim == 0:
if sign == -1:
_A = last_A.clamp(min=0) * self.lw.unsqueeze(0
) + last_A.clamp(max=0) * self.uw.unsqueeze(0)
_bias = last_A.clamp(min=0) * self.lb.unsqueeze(0
) + last_A.clamp(max=0) * self.ub.unsqueeze(0)
elif sign == 1:
_A = last_A.clamp(min=0) * self.uw.unsqueeze(0
) + last_A.clamp(max=0) * self.lw.unsqueeze(0)
_bias = last_A.clamp(min=0) * self.ub.unsqueeze(0
) + last_A.clamp(max=0) * self.lb.unsqueeze(0)
while _bias.ndim > 2:
_bias = torch.sum(_bias, dim=-1)
elif self.batch_dim == -1:
mask = torch.gt(last_A, 0.0)
if sign == -1:
_A = last_A * (mask * self.lw.unsqueeze(0).unsqueeze(1) +
(1 - mask) * self.uw.unsqueeze(0).unsqueeze(1))
_bias = last_A * (mask * self.lb.unsqueeze(0).unsqueeze
(1) + (1 - mask) * self.ub.unsqueeze(0).unsqueeze(1))
elif sign == 1:
_A = last_A * (mask * self.uw.unsqueeze(0).unsqueeze(1) +
(1 - mask) * self.lw.unsqueeze(0).unsqueeze(1))
_bias = last_A * (mask * self.ub.unsqueeze(0).unsqueeze
(1) + (1 - mask) * self.lb.unsqueeze(0).unsqueeze(1))
while _bias.ndim > 2:
_bias = torch.sum(_bias, dim=-1)
else:
raise NotImplementedError
return _A, _bias
lA, lbias = _bound_oneside(last_lA, sign=-1)
uA, ubias = _bound_oneside(last_uA, sign=+1)
return [(lA, uA)], lbias, ubias
def bound_forward(self, dim_in, x):
if not self.relaxed:
self._init_linear(x)
self.bound_relax(x)
if self.lw.ndim > 0:
if x.lw is not None:
lw = self.lw.unsqueeze(1).clamp(min=0
) * x.lw + self.lw.unsqueeze(1).clamp(max=0) * x.uw
uw = self.uw.unsqueeze(1).clamp(max=0
) * x.lw + self.uw.unsqueeze(1).clamp(min=0) * x.uw
else:
lw = uw = None
elif x.lw is not None:
lw = self.lw.unsqueeze(0).clamp(min=0) * x.lw + self.lw.unsqueeze(0
).clamp(max=0) * x.uw
uw = self.uw.unsqueeze(0).clamp(min=0) * x.lw + self.uw.unsqueeze(0
).clamp(max=0) * x.uw
else:
lw = uw = None
lb = self.lw.clamp(min=0) * x.lb + self.lw.clamp(max=0
) * x.ub + self.lb
ub = self.uw.clamp(max=0) * x.lb + self.uw.clamp(min=0
) * x.ub + self.ub
return LinearBound(lw, lb, uw, ub)
def infer_batch_dim(self, batch_size, *x):
return x[0]
class BoundReciprocalNew(BoundActivation):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
self.nonlinear = True
def bound_relax(self, x):
m = (x.lower + x.upper) / 2
kl = -1 / m.pow(2)
self._add_linear(mask=None, type='lower', k=kl, x0=m, y0=1.0 / m)
ku = -1.0 / (x.lower * x.upper)
self._add_linear(mask=None, type='upper', k=ku, x0=x.lower, y0=1.0 /
x.lower)
def interval_propagate(self, *v):
h_L, h_U = v[0]
return torch.reciprocal(h_U.float()), torch.reciprocal(h_L.float())
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Mahoumaru/auto_LiRPA | BoundReciprocal | false | 13,227 | [
"BSD-3-Clause"
]
| 0 | b03a6c36eb1b921726778359d6d2b94e0cd7e480 | https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480 |
BoundCos | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/hn/chncdvnujauxq6f6q7jnanla4d6y3auixelm26y42jq3nuckgdxy.py
# Topologically Sorted Source Nodes: [cos], Original ATen: [aten.cos]
# Source node to ATen node mapping:
# cos => cos
# Graph fragment:
# %cos : [num_users=1] = call_function[target=torch.ops.aten.cos.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_cos_0 = async_compile.triton('triton_poi_fused_cos_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cos_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cos_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl_math.cos(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cos], Original ATen: [aten.cos]
stream0 = get_raw_stream(0)
triton_poi_fused_cos_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from numbers import Number
from torch.nn import MSELoss
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
if not isinstance(eps, Number):
if not isinstance(eps, torch.Tensor):
self.eps = torch.tensor(eps)
else:
self.eps = eps
if len(self.eps.shape) == 1:
self.eps = torch.diag(self.eps)
assert self.eps.shape[0] == self.eps.shape[1
], 'Argument [eps] must form a n by n square matrix.'
self.norm = 2
else:
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
if isinstance(self.eps, Number):
deviation = A.norm(self.dual_norm, -1) * self.eps
else:
deviation = A.matmul(self.eps.transpose(0, 1)).norm(
self.dual_norm, -1)
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
elif isinstance(self.eps, Number):
bound = x + sign * self.eps
else:
bound = x + sign * self.eps.transpose(0, 1).norm(self.
dual_norm, -1)
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
center = (x_U + x_L) / 2.0
diff = (x_U - x_L) / 2.0
if not A.identity == 1:
unfold_input = F.unfold(center, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound = prod.view(prod.size(0), prod.size(1), int(math.
sqrt(prod.size(2))), int(math.sqrt(prod.size(2))))
unfold_input = F.unfold(diff, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches.abs()
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound += sign * prod.view(prod.size(0), prod.size(1),
int(math.sqrt(prod.size(2))), int(math.sqrt(prod.
size(2))))
else:
bound = center + sign * diff
return bound
else:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
raise NotImplementedError()
if isinstance(A, eyeC) or isinstance(A, torch.Tensor):
return concretize_matrix(A)
elif isinstance(A, Patches):
return concretize_patches(A)
elif isinstance(A, BoundList):
for b in A.bound_list:
if isinstance(b, eyeC) or isinstance(b, torch.Tensor):
pass
else:
raise NotImplementedError()
def init(self, x, aux=None, forward=False):
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
else:
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
if self.norm == np.inf:
if self.x_L is None and self.x_U is None:
return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps)
else:
return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})'
.format(self.eps, self.x_L, self.x_U))
else:
return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm,
self.eps)
class PerturbationSynonym(Perturbation):
def __init__(self, budget, eps=1.0, use_simple=False):
super(PerturbationSynonym, self).__init__()
self._load_synonyms()
self.budget = budget
self.eps = eps
self.use_simple = use_simple
self.model = None
self.train = False
def __repr__(self):
return (
'perturbation(Synonym-based word substitution budget={}, eps={})'
.format(self.budget, self.eps))
def _load_synonyms(self, path='data/synonyms.json'):
with open(path) as file:
self.synonym = json.loads(file.read())
logger.info('Synonym list loaded for {} words'.format(len(self.
synonym)))
def set_train(self, train):
self.train = train
def concretize(self, x, A, sign, aux):
assert self.model is not None
x_rep, mask, can_be_replaced = aux
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
dim_out = A.shape[1]
max_num_cand = x_rep.shape[2]
mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32,
device=A.device)
num_pos = int(np.max(np.sum(can_be_replaced, axis=-1)))
update_A = A.shape[-1] > num_pos * dim_word
if update_A:
bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape(
batch_size, -1, 1)).squeeze(-1)
else:
bias = 0.0
A = A.reshape(batch_size, dim_out, -1, dim_word)
A_new, x_new, x_rep_new, mask_new = [], [], [], []
zeros_A = torch.zeros(dim_out, dim_word, device=A.device)
zeros_w = torch.zeros(dim_word, device=A.device)
zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device)
zeros_mask = torch.zeros(max_num_cand, device=A.device)
for t in range(batch_size):
cnt = 0
for i in range(0, length):
if can_be_replaced[t][i]:
if update_A:
A_new.append(A[t, :, i, :])
x_new.append(x[t][i])
x_rep_new.append(x_rep[t][i])
mask_new.append(mask[t][i])
cnt += 1
if update_A:
A_new += [zeros_A] * (num_pos - cnt)
x_new += [zeros_w] * (num_pos - cnt)
x_rep_new += [zeros_rep] * (num_pos - cnt)
mask_new += [zeros_mask] * (num_pos - cnt)
if update_A:
A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word
).transpose(1, 2)
x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word)
x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos,
max_num_cand, dim_word)
mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand)
length = num_pos
A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2)
x = x.reshape(batch_size, length, -1, 1)
if sign == 1:
cmp, init = torch.max, -1e+30
else:
cmp, init = torch.min, 1e+30
init_tensor = torch.ones(batch_size, dim_out) * init
dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1)
]
dp[0][0] = torch.zeros(batch_size, dim_out)
A = A.reshape(batch_size * length, A.shape[2], A.shape[3])
Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x.
shape[3])).reshape(batch_size, length, A.shape[1])
Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length,
max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size,
length, A.shape[1], max_num_cand)
Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2)
Ax_rep_bound = cmp(Ax_rep, dim=-1).values
if self.use_simple and self.train:
return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias
for i in range(1, length + 1):
dp[i][0] = dp[i - 1][0] + Ax[:, i - 1]
for j in range(1, self.budget + 1):
dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1
] + Ax_rep_bound[:, i - 1])
dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1,
batch_size, dim_out)
return cmp(dp, dim=0).values + bias
def init(self, x, aux=None, forward=False):
tokens, batch = aux
self.tokens = tokens
assert len(x.shape) == 3
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
max_pos = 1
can_be_replaced = np.zeros((batch_size, length), dtype=np.bool)
self._build_substitution(batch)
for t in range(batch_size):
cnt = 0
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
for i in range(len(tokens[t])):
if tokens[t][i] == '[UNK]' or len(candidates[i]
) == 0 or tokens[t][i] != candidates[i][0]:
continue
for w in candidates[i][1:]:
if w in self.model.vocab:
can_be_replaced[t][i] = True
cnt += 1
break
max_pos = max(max_pos, cnt)
dim = max_pos * dim_word
if forward:
eye = torch.eye(dim_word)
lw = torch.zeros(batch_size, dim, length, dim_word)
lb = torch.zeros_like(x)
word_embeddings = self.model.word_embeddings.weight
vocab = self.model.vocab
x_rep = [[[] for i in range(length)] for t in range(batch_size)]
max_num_cand = 1
for t in range(batch_size):
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
cnt = 0
for i in range(length):
if can_be_replaced[t][i]:
word_embed = word_embeddings[vocab[tokens[t][i]]]
other_embed = x[t, i] - word_embed
if forward:
lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye
lb[t, i, :] = torch.zeros_like(word_embed)
for w in candidates[i][1:]:
if w in self.model.vocab:
x_rep[t][i].append(word_embeddings[self.model.
vocab[w]] + other_embed)
max_num_cand = max(max_num_cand, len(x_rep[t][i]))
cnt += 1
elif forward:
lb[t, i, :] = x[t, i, :]
if forward:
uw, ub = lw, lb
else:
lw = lb = uw = ub = None
zeros = torch.zeros(dim_word, device=x.device)
x_rep_, mask = [], []
for t in range(batch_size):
for i in range(length):
x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep
[t][i]))
mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len(
x_rep[t][i]))
x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand,
dim_word)
mask = torch.tensor(mask, dtype=torch.float32, device=x.device
).reshape(batch_size, length, max_num_cand)
x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps)
inf = 1e+20
lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * inf, dim=2).values
upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * -inf, dim=2).values
lower = torch.min(lower, x)
upper = torch.max(upper, x)
return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask,
can_be_replaced)
def _build_substitution(self, batch):
for t, example in enumerate(batch):
if 'candidates' not in example or example['candidates'] is None:
candidates = []
tokens = example['sentence'].strip().lower().split(' ')
for i in range(len(tokens)):
_cand = []
if tokens[i] in self.synonym:
for w in self.synonym[tokens[i]]:
if w in self.model.vocab:
_cand.append(w)
if len(_cand) > 0:
_cand = [tokens[i]] + _cand
candidates.append(_cand)
example['candidates'] = candidates
class Interval(tuple):
def __new__(self, lb=None, ub=None, ptb=None):
if ub is None:
assert isinstance(lb, tuple)
lb, ub = lb
return tuple.__new__(Interval, (lb, ub))
def __init__(self, lb, ub, ptb=None):
if ptb is None:
self.ptb = None
assert lb is ub
elif not isinstance(ptb, Perturbation):
raise ValueError(
'ptb must be a Perturbation object or None. Got type {}'.
format(type(ptb)))
else:
self.ptb = ptb
def __str__(self):
return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb)
def __repr__(self):
return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1],
self.ptb)
"""Checking if the other interval is tuple, keep the perturbation."""
@staticmethod
def make_interval(lb, ub, other):
if isinstance(other, Interval):
return Interval(lb, ub, other.ptb)
else:
return lb, ub
"""Given a tuple or Interval object, returns the norm and eps."""
@staticmethod
def get_perturbation(interval):
if isinstance(interval, Interval):
if isinstance(interval.ptb, PerturbationLpNorm):
return interval.ptb.norm, interval.ptb.eps
elif isinstance(interval.ptb, PerturbationSynonym):
return np.inf, 1.0
elif isinstance(interval.ptb, PerturbationL0Norm):
return 0, interval.ptb.eps, interval.ptb.ratio
elif interval.ptb is None:
raise RuntimeError(
'get_perturbation() encountered an interval that is not perturbed.'
)
else:
raise RuntimeError(
'get_perturbation() does not know how to handle {}'.
format(type(interval.ptb)))
else:
return np.inf, np.nan
"""Checking if a Interval or tuple object has perturbation enabled."""
@staticmethod
def is_perturbed(interval):
if isinstance(interval, Interval) and interval.ptb is None:
return False
else:
return True
class Bound(nn.Module):
def __init__(self, input_name, name, ori_name, attr={}, inputs=[],
output_index=0, options={}, device=None):
super().__init__()
self.output_name = []
(self.input_name, self.name, self.ori_name, self.attr, self.inputs,
self.output_index, self.options, self.device) = (input_name,
name, ori_name, attr, inputs, output_index, options, device)
self.fv = None
self.from_input = False
self.bounded = False
self.IBP_rets = None
self.perturbed = False
if options is not None and 'loss_fusion' in options:
self.loss_fusion = options['loss_fusion']
else:
self.loss_fusion = False
"""Check if the i-th input is with perturbation or not."""
def is_input_perturbed(self, i=0):
return self.inputs[i].perturbed
def forward(self, *x):
raise NotImplementedError
def interval_propagate(self, *v):
assert len(v) == 1
h_L, h_U = v[0]
return Interval.make_interval(self.forward(h_L), self.forward(h_U),
v[0])
def bound_forward(self, dim_in, last):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA):
raise NotImplementedError
def infer_batch_dim(self, batch_size, *x):
None
raise NotImplementedError
def broadcast_backward(self, A, x):
shape = x.default_shape
batch_dim = max(self.batch_dim, 0)
if isinstance(A, torch.Tensor):
if x.batch_dim == -1:
shape = torch.Size([A.shape[batch_dim + 1]] + list(shape))
dims = []
cnt_sum = A.ndim - len(shape) - 1
for i in range(1, A.ndim):
if i != self.batch_dim + 1 and cnt_sum > 0:
dims.append(i)
cnt_sum -= 1
if dims:
A = torch.sum(A, dim=dims)
else:
dims = list(range(1, 1 + A.ndim - 1 - len(shape)))
if dims:
A = torch.sum(A, dim=dims)
dims = []
for i in range(len(shape)):
if shape[i] == 1 and A.shape[i + 1] != 1:
dims.append(i + 1)
if dims:
A = torch.sum(A, dim=dims, keepdim=True)
assert A.shape[1:] == shape
elif type(A) == Patches:
pass
return A
@staticmethod
def broadcast_forward(dim_in, x, shape_res):
lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub
shape_x, shape_res = list(x.lb.shape), list(shape_res)
if lw is None:
lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device)
has_batch_size = False
else:
has_batch_size = True
while len(shape_x) < len(shape_res):
if not has_batch_size:
lw, uw = lw.unsqueeze(0), uw.unsqueeze(0)
lb, ub = lb.unsqueeze(0), ub.unsqueeze(0)
shape_x = [1] + shape_x
has_batch_size = True
else:
lw, uw = lw.unsqueeze(2), uw.unsqueeze(2)
lb, ub = lb.unsqueeze(1), ub.unsqueeze(1)
shape_x = [shape_x[0], 1] + shape_x[1:]
repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))]
lb, ub = lb.repeat(*repeat), ub.repeat(*repeat)
repeat = repeat[:1] + [1] + repeat[1:]
lw, uw = lw.repeat(*repeat), uw.repeat(*repeat)
return lw, lb, uw, ub
def get_bias(self, A, bias):
if A is None:
return 0
assert not isnan(A)
assert not isnan(bias)
if isinstance(A, torch.Tensor):
if torch.norm(A, p=1) < epsilon:
return 0
output_dim = A.shape[0]
if self.batch_dim != -1:
batch_size = A.shape[self.batch_dim + 1]
A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1
]).astype(np.int32), batch_size, np.prod(A.shape[self.
batch_dim + 2:]).astype(np.int32)]
A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size,
output_dim, -1)
bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape(
batch_size, -1, 1)
bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1)
else:
batch_size = A.shape[1]
A = A.view(output_dim, batch_size, -1)
bias_new = A.matmul(bias.view(-1))
if isnan(bias_new):
return 0
else:
return bias_new
elif type(A) == Patches:
if torch.norm(A.patches, p=1) < epsilon:
return 0
if self.batch_dim != -1:
batch_size = bias.shape[0]
bias = F.unfold(bias, kernel_size=A.patches.size(-1),
stride=A.stride, padding=A.padding).transpose(-2, -1
).unsqueeze(-2)
bias.size(1)
patches = A.patches.view(A.patches.size(0), A.patches.size(
1), A.patches.size(-4), A.patches.size(-1) * A.patches.
size(-2) * A.patches.size(-3))
prod = bias * patches
bias_new = prod.sum(-1).transpose(-2, -1)
bias_new = bias_new.view(batch_size, bias_new.size(-2), int
(math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new.
size(-1))))
else:
patches = A.patches
patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias
patches_reshape = patches_reshape.transpose(-1, -2)
return patches_reshape.view(patches_reshape.size(0),
patches_reshape.size(1), int(math.sqrt(patches_reshape.
size(2))), -1).transpose(0, 1)
return bias_new
else:
return NotImplementedError()
class BoundCos(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
def forward(self, x):
return torch.cos(x)
def infer_batch_dim(self, batch_size, *x):
return x[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_name': 4, 'name': 4, 'ori_name': 4, 'attr': 4,
'inputs': 4, 'output_index': 4, 'options': _mock_config(loss_fusion
=MSELoss()), 'device': 0}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from numbers import Number
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cos_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.cos(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cos_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
if not isinstance(eps, Number):
if not isinstance(eps, torch.Tensor):
self.eps = torch.tensor(eps)
else:
self.eps = eps
if len(self.eps.shape) == 1:
self.eps = torch.diag(self.eps)
assert self.eps.shape[0] == self.eps.shape[1
], 'Argument [eps] must form a n by n square matrix.'
self.norm = 2
else:
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
if isinstance(self.eps, Number):
deviation = A.norm(self.dual_norm, -1) * self.eps
else:
deviation = A.matmul(self.eps.transpose(0, 1)).norm(
self.dual_norm, -1)
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
elif isinstance(self.eps, Number):
bound = x + sign * self.eps
else:
bound = x + sign * self.eps.transpose(0, 1).norm(self.
dual_norm, -1)
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
center = (x_U + x_L) / 2.0
diff = (x_U - x_L) / 2.0
if not A.identity == 1:
unfold_input = F.unfold(center, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound = prod.view(prod.size(0), prod.size(1), int(math.
sqrt(prod.size(2))), int(math.sqrt(prod.size(2))))
unfold_input = F.unfold(diff, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches.abs()
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound += sign * prod.view(prod.size(0), prod.size(1),
int(math.sqrt(prod.size(2))), int(math.sqrt(prod.
size(2))))
else:
bound = center + sign * diff
return bound
else:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
raise NotImplementedError()
if isinstance(A, eyeC) or isinstance(A, torch.Tensor):
return concretize_matrix(A)
elif isinstance(A, Patches):
return concretize_patches(A)
elif isinstance(A, BoundList):
for b in A.bound_list:
if isinstance(b, eyeC) or isinstance(b, torch.Tensor):
pass
else:
raise NotImplementedError()
def init(self, x, aux=None, forward=False):
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
else:
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
if self.norm == np.inf:
if self.x_L is None and self.x_U is None:
return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps)
else:
return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})'
.format(self.eps, self.x_L, self.x_U))
else:
return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm,
self.eps)
class PerturbationSynonym(Perturbation):
def __init__(self, budget, eps=1.0, use_simple=False):
super(PerturbationSynonym, self).__init__()
self._load_synonyms()
self.budget = budget
self.eps = eps
self.use_simple = use_simple
self.model = None
self.train = False
def __repr__(self):
return (
'perturbation(Synonym-based word substitution budget={}, eps={})'
.format(self.budget, self.eps))
def _load_synonyms(self, path='data/synonyms.json'):
with open(path) as file:
self.synonym = json.loads(file.read())
logger.info('Synonym list loaded for {} words'.format(len(self.
synonym)))
def set_train(self, train):
self.train = train
def concretize(self, x, A, sign, aux):
assert self.model is not None
x_rep, mask, can_be_replaced = aux
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
dim_out = A.shape[1]
max_num_cand = x_rep.shape[2]
mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32,
device=A.device)
num_pos = int(np.max(np.sum(can_be_replaced, axis=-1)))
update_A = A.shape[-1] > num_pos * dim_word
if update_A:
bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape(
batch_size, -1, 1)).squeeze(-1)
else:
bias = 0.0
A = A.reshape(batch_size, dim_out, -1, dim_word)
A_new, x_new, x_rep_new, mask_new = [], [], [], []
zeros_A = torch.zeros(dim_out, dim_word, device=A.device)
zeros_w = torch.zeros(dim_word, device=A.device)
zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device)
zeros_mask = torch.zeros(max_num_cand, device=A.device)
for t in range(batch_size):
cnt = 0
for i in range(0, length):
if can_be_replaced[t][i]:
if update_A:
A_new.append(A[t, :, i, :])
x_new.append(x[t][i])
x_rep_new.append(x_rep[t][i])
mask_new.append(mask[t][i])
cnt += 1
if update_A:
A_new += [zeros_A] * (num_pos - cnt)
x_new += [zeros_w] * (num_pos - cnt)
x_rep_new += [zeros_rep] * (num_pos - cnt)
mask_new += [zeros_mask] * (num_pos - cnt)
if update_A:
A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word
).transpose(1, 2)
x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word)
x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos,
max_num_cand, dim_word)
mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand)
length = num_pos
A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2)
x = x.reshape(batch_size, length, -1, 1)
if sign == 1:
cmp, init = torch.max, -1e+30
else:
cmp, init = torch.min, 1e+30
init_tensor = torch.ones(batch_size, dim_out) * init
dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1)
]
dp[0][0] = torch.zeros(batch_size, dim_out)
A = A.reshape(batch_size * length, A.shape[2], A.shape[3])
Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x.
shape[3])).reshape(batch_size, length, A.shape[1])
Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length,
max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size,
length, A.shape[1], max_num_cand)
Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2)
Ax_rep_bound = cmp(Ax_rep, dim=-1).values
if self.use_simple and self.train:
return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias
for i in range(1, length + 1):
dp[i][0] = dp[i - 1][0] + Ax[:, i - 1]
for j in range(1, self.budget + 1):
dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1
] + Ax_rep_bound[:, i - 1])
dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1,
batch_size, dim_out)
return cmp(dp, dim=0).values + bias
def init(self, x, aux=None, forward=False):
tokens, batch = aux
self.tokens = tokens
assert len(x.shape) == 3
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
max_pos = 1
can_be_replaced = np.zeros((batch_size, length), dtype=np.bool)
self._build_substitution(batch)
for t in range(batch_size):
cnt = 0
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
for i in range(len(tokens[t])):
if tokens[t][i] == '[UNK]' or len(candidates[i]
) == 0 or tokens[t][i] != candidates[i][0]:
continue
for w in candidates[i][1:]:
if w in self.model.vocab:
can_be_replaced[t][i] = True
cnt += 1
break
max_pos = max(max_pos, cnt)
dim = max_pos * dim_word
if forward:
eye = torch.eye(dim_word)
lw = torch.zeros(batch_size, dim, length, dim_word)
lb = torch.zeros_like(x)
word_embeddings = self.model.word_embeddings.weight
vocab = self.model.vocab
x_rep = [[[] for i in range(length)] for t in range(batch_size)]
max_num_cand = 1
for t in range(batch_size):
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
cnt = 0
for i in range(length):
if can_be_replaced[t][i]:
word_embed = word_embeddings[vocab[tokens[t][i]]]
other_embed = x[t, i] - word_embed
if forward:
lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye
lb[t, i, :] = torch.zeros_like(word_embed)
for w in candidates[i][1:]:
if w in self.model.vocab:
x_rep[t][i].append(word_embeddings[self.model.
vocab[w]] + other_embed)
max_num_cand = max(max_num_cand, len(x_rep[t][i]))
cnt += 1
elif forward:
lb[t, i, :] = x[t, i, :]
if forward:
uw, ub = lw, lb
else:
lw = lb = uw = ub = None
zeros = torch.zeros(dim_word, device=x.device)
x_rep_, mask = [], []
for t in range(batch_size):
for i in range(length):
x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep
[t][i]))
mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len(
x_rep[t][i]))
x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand,
dim_word)
mask = torch.tensor(mask, dtype=torch.float32, device=x.device
).reshape(batch_size, length, max_num_cand)
x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps)
inf = 1e+20
lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * inf, dim=2).values
upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * -inf, dim=2).values
lower = torch.min(lower, x)
upper = torch.max(upper, x)
return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask,
can_be_replaced)
def _build_substitution(self, batch):
for t, example in enumerate(batch):
if 'candidates' not in example or example['candidates'] is None:
candidates = []
tokens = example['sentence'].strip().lower().split(' ')
for i in range(len(tokens)):
_cand = []
if tokens[i] in self.synonym:
for w in self.synonym[tokens[i]]:
if w in self.model.vocab:
_cand.append(w)
if len(_cand) > 0:
_cand = [tokens[i]] + _cand
candidates.append(_cand)
example['candidates'] = candidates
class Interval(tuple):
def __new__(self, lb=None, ub=None, ptb=None):
if ub is None:
assert isinstance(lb, tuple)
lb, ub = lb
return tuple.__new__(Interval, (lb, ub))
def __init__(self, lb, ub, ptb=None):
if ptb is None:
self.ptb = None
assert lb is ub
elif not isinstance(ptb, Perturbation):
raise ValueError(
'ptb must be a Perturbation object or None. Got type {}'.
format(type(ptb)))
else:
self.ptb = ptb
def __str__(self):
return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb)
def __repr__(self):
return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1],
self.ptb)
"""Checking if the other interval is tuple, keep the perturbation."""
@staticmethod
def make_interval(lb, ub, other):
if isinstance(other, Interval):
return Interval(lb, ub, other.ptb)
else:
return lb, ub
"""Given a tuple or Interval object, returns the norm and eps."""
@staticmethod
def get_perturbation(interval):
if isinstance(interval, Interval):
if isinstance(interval.ptb, PerturbationLpNorm):
return interval.ptb.norm, interval.ptb.eps
elif isinstance(interval.ptb, PerturbationSynonym):
return np.inf, 1.0
elif isinstance(interval.ptb, PerturbationL0Norm):
return 0, interval.ptb.eps, interval.ptb.ratio
elif interval.ptb is None:
raise RuntimeError(
'get_perturbation() encountered an interval that is not perturbed.'
)
else:
raise RuntimeError(
'get_perturbation() does not know how to handle {}'.
format(type(interval.ptb)))
else:
return np.inf, np.nan
"""Checking if a Interval or tuple object has perturbation enabled."""
@staticmethod
def is_perturbed(interval):
if isinstance(interval, Interval) and interval.ptb is None:
return False
else:
return True
class Bound(nn.Module):
def __init__(self, input_name, name, ori_name, attr={}, inputs=[],
output_index=0, options={}, device=None):
super().__init__()
self.output_name = []
(self.input_name, self.name, self.ori_name, self.attr, self.inputs,
self.output_index, self.options, self.device) = (input_name,
name, ori_name, attr, inputs, output_index, options, device)
self.fv = None
self.from_input = False
self.bounded = False
self.IBP_rets = None
self.perturbed = False
if options is not None and 'loss_fusion' in options:
self.loss_fusion = options['loss_fusion']
else:
self.loss_fusion = False
"""Check if the i-th input is with perturbation or not."""
def is_input_perturbed(self, i=0):
return self.inputs[i].perturbed
def forward(self, *x):
raise NotImplementedError
def interval_propagate(self, *v):
assert len(v) == 1
h_L, h_U = v[0]
return Interval.make_interval(self.forward(h_L), self.forward(h_U),
v[0])
def bound_forward(self, dim_in, last):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA):
raise NotImplementedError
def infer_batch_dim(self, batch_size, *x):
None
raise NotImplementedError
def broadcast_backward(self, A, x):
shape = x.default_shape
batch_dim = max(self.batch_dim, 0)
if isinstance(A, torch.Tensor):
if x.batch_dim == -1:
shape = torch.Size([A.shape[batch_dim + 1]] + list(shape))
dims = []
cnt_sum = A.ndim - len(shape) - 1
for i in range(1, A.ndim):
if i != self.batch_dim + 1 and cnt_sum > 0:
dims.append(i)
cnt_sum -= 1
if dims:
A = torch.sum(A, dim=dims)
else:
dims = list(range(1, 1 + A.ndim - 1 - len(shape)))
if dims:
A = torch.sum(A, dim=dims)
dims = []
for i in range(len(shape)):
if shape[i] == 1 and A.shape[i + 1] != 1:
dims.append(i + 1)
if dims:
A = torch.sum(A, dim=dims, keepdim=True)
assert A.shape[1:] == shape
elif type(A) == Patches:
pass
return A
@staticmethod
def broadcast_forward(dim_in, x, shape_res):
lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub
shape_x, shape_res = list(x.lb.shape), list(shape_res)
if lw is None:
lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device)
has_batch_size = False
else:
has_batch_size = True
while len(shape_x) < len(shape_res):
if not has_batch_size:
lw, uw = lw.unsqueeze(0), uw.unsqueeze(0)
lb, ub = lb.unsqueeze(0), ub.unsqueeze(0)
shape_x = [1] + shape_x
has_batch_size = True
else:
lw, uw = lw.unsqueeze(2), uw.unsqueeze(2)
lb, ub = lb.unsqueeze(1), ub.unsqueeze(1)
shape_x = [shape_x[0], 1] + shape_x[1:]
repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))]
lb, ub = lb.repeat(*repeat), ub.repeat(*repeat)
repeat = repeat[:1] + [1] + repeat[1:]
lw, uw = lw.repeat(*repeat), uw.repeat(*repeat)
return lw, lb, uw, ub
def get_bias(self, A, bias):
if A is None:
return 0
assert not isnan(A)
assert not isnan(bias)
if isinstance(A, torch.Tensor):
if torch.norm(A, p=1) < epsilon:
return 0
output_dim = A.shape[0]
if self.batch_dim != -1:
batch_size = A.shape[self.batch_dim + 1]
A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1
]).astype(np.int32), batch_size, np.prod(A.shape[self.
batch_dim + 2:]).astype(np.int32)]
A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size,
output_dim, -1)
bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape(
batch_size, -1, 1)
bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1)
else:
batch_size = A.shape[1]
A = A.view(output_dim, batch_size, -1)
bias_new = A.matmul(bias.view(-1))
if isnan(bias_new):
return 0
else:
return bias_new
elif type(A) == Patches:
if torch.norm(A.patches, p=1) < epsilon:
return 0
if self.batch_dim != -1:
batch_size = bias.shape[0]
bias = F.unfold(bias, kernel_size=A.patches.size(-1),
stride=A.stride, padding=A.padding).transpose(-2, -1
).unsqueeze(-2)
bias.size(1)
patches = A.patches.view(A.patches.size(0), A.patches.size(
1), A.patches.size(-4), A.patches.size(-1) * A.patches.
size(-2) * A.patches.size(-3))
prod = bias * patches
bias_new = prod.sum(-1).transpose(-2, -1)
bias_new = bias_new.view(batch_size, bias_new.size(-2), int
(math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new.
size(-1))))
else:
patches = A.patches
patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias
patches_reshape = patches_reshape.transpose(-1, -2)
return patches_reshape.view(patches_reshape.size(0),
patches_reshape.size(1), int(math.sqrt(patches_reshape.
size(2))), -1).transpose(0, 1)
return bias_new
else:
return NotImplementedError()
class BoundCosNew(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
def infer_batch_dim(self, batch_size, *x):
return x[0]
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Mahoumaru/auto_LiRPA | BoundCos | false | 13,228 | [
"BSD-3-Clause"
]
| 0 | b03a6c36eb1b921726778359d6d2b94e0cd7e480 | https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480 |
BoundSub | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/2x/c2xnwzkmtsschsc4kip66bb4tudxm35hzl4lx5aabvhr6szhgwdb.py
# Topologically Sorted Source Nodes: [sub], Original ATen: [aten.sub]
# Source node to ATen node mapping:
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
triton_poi_fused_sub_0 = async_compile.triton('triton_poi_fused_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub], Original ATen: [aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_sub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from numbers import Number
from torch.nn import MSELoss
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
if not isinstance(eps, Number):
if not isinstance(eps, torch.Tensor):
self.eps = torch.tensor(eps)
else:
self.eps = eps
if len(self.eps.shape) == 1:
self.eps = torch.diag(self.eps)
assert self.eps.shape[0] == self.eps.shape[1
], 'Argument [eps] must form a n by n square matrix.'
self.norm = 2
else:
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
if isinstance(self.eps, Number):
deviation = A.norm(self.dual_norm, -1) * self.eps
else:
deviation = A.matmul(self.eps.transpose(0, 1)).norm(
self.dual_norm, -1)
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
elif isinstance(self.eps, Number):
bound = x + sign * self.eps
else:
bound = x + sign * self.eps.transpose(0, 1).norm(self.
dual_norm, -1)
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
center = (x_U + x_L) / 2.0
diff = (x_U - x_L) / 2.0
if not A.identity == 1:
unfold_input = F.unfold(center, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound = prod.view(prod.size(0), prod.size(1), int(math.
sqrt(prod.size(2))), int(math.sqrt(prod.size(2))))
unfold_input = F.unfold(diff, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches.abs()
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound += sign * prod.view(prod.size(0), prod.size(1),
int(math.sqrt(prod.size(2))), int(math.sqrt(prod.
size(2))))
else:
bound = center + sign * diff
return bound
else:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
raise NotImplementedError()
if isinstance(A, eyeC) or isinstance(A, torch.Tensor):
return concretize_matrix(A)
elif isinstance(A, Patches):
return concretize_patches(A)
elif isinstance(A, BoundList):
for b in A.bound_list:
if isinstance(b, eyeC) or isinstance(b, torch.Tensor):
pass
else:
raise NotImplementedError()
def init(self, x, aux=None, forward=False):
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
else:
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
if self.norm == np.inf:
if self.x_L is None and self.x_U is None:
return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps)
else:
return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})'
.format(self.eps, self.x_L, self.x_U))
else:
return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm,
self.eps)
class PerturbationSynonym(Perturbation):
def __init__(self, budget, eps=1.0, use_simple=False):
super(PerturbationSynonym, self).__init__()
self._load_synonyms()
self.budget = budget
self.eps = eps
self.use_simple = use_simple
self.model = None
self.train = False
def __repr__(self):
return (
'perturbation(Synonym-based word substitution budget={}, eps={})'
.format(self.budget, self.eps))
def _load_synonyms(self, path='data/synonyms.json'):
with open(path) as file:
self.synonym = json.loads(file.read())
logger.info('Synonym list loaded for {} words'.format(len(self.
synonym)))
def set_train(self, train):
self.train = train
def concretize(self, x, A, sign, aux):
assert self.model is not None
x_rep, mask, can_be_replaced = aux
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
dim_out = A.shape[1]
max_num_cand = x_rep.shape[2]
mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32,
device=A.device)
num_pos = int(np.max(np.sum(can_be_replaced, axis=-1)))
update_A = A.shape[-1] > num_pos * dim_word
if update_A:
bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape(
batch_size, -1, 1)).squeeze(-1)
else:
bias = 0.0
A = A.reshape(batch_size, dim_out, -1, dim_word)
A_new, x_new, x_rep_new, mask_new = [], [], [], []
zeros_A = torch.zeros(dim_out, dim_word, device=A.device)
zeros_w = torch.zeros(dim_word, device=A.device)
zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device)
zeros_mask = torch.zeros(max_num_cand, device=A.device)
for t in range(batch_size):
cnt = 0
for i in range(0, length):
if can_be_replaced[t][i]:
if update_A:
A_new.append(A[t, :, i, :])
x_new.append(x[t][i])
x_rep_new.append(x_rep[t][i])
mask_new.append(mask[t][i])
cnt += 1
if update_A:
A_new += [zeros_A] * (num_pos - cnt)
x_new += [zeros_w] * (num_pos - cnt)
x_rep_new += [zeros_rep] * (num_pos - cnt)
mask_new += [zeros_mask] * (num_pos - cnt)
if update_A:
A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word
).transpose(1, 2)
x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word)
x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos,
max_num_cand, dim_word)
mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand)
length = num_pos
A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2)
x = x.reshape(batch_size, length, -1, 1)
if sign == 1:
cmp, init = torch.max, -1e+30
else:
cmp, init = torch.min, 1e+30
init_tensor = torch.ones(batch_size, dim_out) * init
dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1)
]
dp[0][0] = torch.zeros(batch_size, dim_out)
A = A.reshape(batch_size * length, A.shape[2], A.shape[3])
Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x.
shape[3])).reshape(batch_size, length, A.shape[1])
Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length,
max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size,
length, A.shape[1], max_num_cand)
Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2)
Ax_rep_bound = cmp(Ax_rep, dim=-1).values
if self.use_simple and self.train:
return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias
for i in range(1, length + 1):
dp[i][0] = dp[i - 1][0] + Ax[:, i - 1]
for j in range(1, self.budget + 1):
dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1
] + Ax_rep_bound[:, i - 1])
dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1,
batch_size, dim_out)
return cmp(dp, dim=0).values + bias
def init(self, x, aux=None, forward=False):
tokens, batch = aux
self.tokens = tokens
assert len(x.shape) == 3
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
max_pos = 1
can_be_replaced = np.zeros((batch_size, length), dtype=np.bool)
self._build_substitution(batch)
for t in range(batch_size):
cnt = 0
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
for i in range(len(tokens[t])):
if tokens[t][i] == '[UNK]' or len(candidates[i]
) == 0 or tokens[t][i] != candidates[i][0]:
continue
for w in candidates[i][1:]:
if w in self.model.vocab:
can_be_replaced[t][i] = True
cnt += 1
break
max_pos = max(max_pos, cnt)
dim = max_pos * dim_word
if forward:
eye = torch.eye(dim_word)
lw = torch.zeros(batch_size, dim, length, dim_word)
lb = torch.zeros_like(x)
word_embeddings = self.model.word_embeddings.weight
vocab = self.model.vocab
x_rep = [[[] for i in range(length)] for t in range(batch_size)]
max_num_cand = 1
for t in range(batch_size):
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
cnt = 0
for i in range(length):
if can_be_replaced[t][i]:
word_embed = word_embeddings[vocab[tokens[t][i]]]
other_embed = x[t, i] - word_embed
if forward:
lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye
lb[t, i, :] = torch.zeros_like(word_embed)
for w in candidates[i][1:]:
if w in self.model.vocab:
x_rep[t][i].append(word_embeddings[self.model.
vocab[w]] + other_embed)
max_num_cand = max(max_num_cand, len(x_rep[t][i]))
cnt += 1
elif forward:
lb[t, i, :] = x[t, i, :]
if forward:
uw, ub = lw, lb
else:
lw = lb = uw = ub = None
zeros = torch.zeros(dim_word, device=x.device)
x_rep_, mask = [], []
for t in range(batch_size):
for i in range(length):
x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep
[t][i]))
mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len(
x_rep[t][i]))
x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand,
dim_word)
mask = torch.tensor(mask, dtype=torch.float32, device=x.device
).reshape(batch_size, length, max_num_cand)
x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps)
inf = 1e+20
lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * inf, dim=2).values
upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * -inf, dim=2).values
lower = torch.min(lower, x)
upper = torch.max(upper, x)
return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask,
can_be_replaced)
def _build_substitution(self, batch):
for t, example in enumerate(batch):
if 'candidates' not in example or example['candidates'] is None:
candidates = []
tokens = example['sentence'].strip().lower().split(' ')
for i in range(len(tokens)):
_cand = []
if tokens[i] in self.synonym:
for w in self.synonym[tokens[i]]:
if w in self.model.vocab:
_cand.append(w)
if len(_cand) > 0:
_cand = [tokens[i]] + _cand
candidates.append(_cand)
example['candidates'] = candidates
class Interval(tuple):
def __new__(self, lb=None, ub=None, ptb=None):
if ub is None:
assert isinstance(lb, tuple)
lb, ub = lb
return tuple.__new__(Interval, (lb, ub))
def __init__(self, lb, ub, ptb=None):
if ptb is None:
self.ptb = None
assert lb is ub
elif not isinstance(ptb, Perturbation):
raise ValueError(
'ptb must be a Perturbation object or None. Got type {}'.
format(type(ptb)))
else:
self.ptb = ptb
def __str__(self):
return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb)
def __repr__(self):
return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1],
self.ptb)
"""Checking if the other interval is tuple, keep the perturbation."""
@staticmethod
def make_interval(lb, ub, other):
if isinstance(other, Interval):
return Interval(lb, ub, other.ptb)
else:
return lb, ub
"""Given a tuple or Interval object, returns the norm and eps."""
@staticmethod
def get_perturbation(interval):
if isinstance(interval, Interval):
if isinstance(interval.ptb, PerturbationLpNorm):
return interval.ptb.norm, interval.ptb.eps
elif isinstance(interval.ptb, PerturbationSynonym):
return np.inf, 1.0
elif isinstance(interval.ptb, PerturbationL0Norm):
return 0, interval.ptb.eps, interval.ptb.ratio
elif interval.ptb is None:
raise RuntimeError(
'get_perturbation() encountered an interval that is not perturbed.'
)
else:
raise RuntimeError(
'get_perturbation() does not know how to handle {}'.
format(type(interval.ptb)))
else:
return np.inf, np.nan
"""Checking if a Interval or tuple object has perturbation enabled."""
@staticmethod
def is_perturbed(interval):
if isinstance(interval, Interval) and interval.ptb is None:
return False
else:
return True
class Bound(nn.Module):
def __init__(self, input_name, name, ori_name, attr={}, inputs=[],
output_index=0, options={}, device=None):
super().__init__()
self.output_name = []
(self.input_name, self.name, self.ori_name, self.attr, self.inputs,
self.output_index, self.options, self.device) = (input_name,
name, ori_name, attr, inputs, output_index, options, device)
self.fv = None
self.from_input = False
self.bounded = False
self.IBP_rets = None
self.perturbed = False
if options is not None and 'loss_fusion' in options:
self.loss_fusion = options['loss_fusion']
else:
self.loss_fusion = False
"""Check if the i-th input is with perturbation or not."""
def is_input_perturbed(self, i=0):
return self.inputs[i].perturbed
def forward(self, *x):
raise NotImplementedError
def interval_propagate(self, *v):
assert len(v) == 1
h_L, h_U = v[0]
return Interval.make_interval(self.forward(h_L), self.forward(h_U),
v[0])
def bound_forward(self, dim_in, last):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA):
raise NotImplementedError
def infer_batch_dim(self, batch_size, *x):
None
raise NotImplementedError
def broadcast_backward(self, A, x):
shape = x.default_shape
batch_dim = max(self.batch_dim, 0)
if isinstance(A, torch.Tensor):
if x.batch_dim == -1:
shape = torch.Size([A.shape[batch_dim + 1]] + list(shape))
dims = []
cnt_sum = A.ndim - len(shape) - 1
for i in range(1, A.ndim):
if i != self.batch_dim + 1 and cnt_sum > 0:
dims.append(i)
cnt_sum -= 1
if dims:
A = torch.sum(A, dim=dims)
else:
dims = list(range(1, 1 + A.ndim - 1 - len(shape)))
if dims:
A = torch.sum(A, dim=dims)
dims = []
for i in range(len(shape)):
if shape[i] == 1 and A.shape[i + 1] != 1:
dims.append(i + 1)
if dims:
A = torch.sum(A, dim=dims, keepdim=True)
assert A.shape[1:] == shape
elif type(A) == Patches:
pass
return A
@staticmethod
def broadcast_forward(dim_in, x, shape_res):
lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub
shape_x, shape_res = list(x.lb.shape), list(shape_res)
if lw is None:
lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device)
has_batch_size = False
else:
has_batch_size = True
while len(shape_x) < len(shape_res):
if not has_batch_size:
lw, uw = lw.unsqueeze(0), uw.unsqueeze(0)
lb, ub = lb.unsqueeze(0), ub.unsqueeze(0)
shape_x = [1] + shape_x
has_batch_size = True
else:
lw, uw = lw.unsqueeze(2), uw.unsqueeze(2)
lb, ub = lb.unsqueeze(1), ub.unsqueeze(1)
shape_x = [shape_x[0], 1] + shape_x[1:]
repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))]
lb, ub = lb.repeat(*repeat), ub.repeat(*repeat)
repeat = repeat[:1] + [1] + repeat[1:]
lw, uw = lw.repeat(*repeat), uw.repeat(*repeat)
return lw, lb, uw, ub
def get_bias(self, A, bias):
if A is None:
return 0
assert not isnan(A)
assert not isnan(bias)
if isinstance(A, torch.Tensor):
if torch.norm(A, p=1) < epsilon:
return 0
output_dim = A.shape[0]
if self.batch_dim != -1:
batch_size = A.shape[self.batch_dim + 1]
A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1
]).astype(np.int32), batch_size, np.prod(A.shape[self.
batch_dim + 2:]).astype(np.int32)]
A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size,
output_dim, -1)
bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape(
batch_size, -1, 1)
bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1)
else:
batch_size = A.shape[1]
A = A.view(output_dim, batch_size, -1)
bias_new = A.matmul(bias.view(-1))
if isnan(bias_new):
return 0
else:
return bias_new
elif type(A) == Patches:
if torch.norm(A.patches, p=1) < epsilon:
return 0
if self.batch_dim != -1:
batch_size = bias.shape[0]
bias = F.unfold(bias, kernel_size=A.patches.size(-1),
stride=A.stride, padding=A.padding).transpose(-2, -1
).unsqueeze(-2)
bias.size(1)
patches = A.patches.view(A.patches.size(0), A.patches.size(
1), A.patches.size(-4), A.patches.size(-1) * A.patches.
size(-2) * A.patches.size(-3))
prod = bias * patches
bias_new = prod.sum(-1).transpose(-2, -1)
bias_new = bias_new.view(batch_size, bias_new.size(-2), int
(math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new.
size(-1))))
else:
patches = A.patches
patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias
patches_reshape = patches_reshape.transpose(-1, -2)
return patches_reshape.view(patches_reshape.size(0),
patches_reshape.size(1), int(math.sqrt(patches_reshape.
size(2))), -1).transpose(0, 1)
return bias_new
else:
return NotImplementedError()
class BoundMul(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
self.nonlinear = True
def forward(self, x, y):
self.x_shape = x.shape
self.y_shape = y.shape
return x * y
@staticmethod
def get_bound_mul(x_l, x_u, y_l, y_u):
alpha_l = y_l
beta_l = x_l
gamma_l = -alpha_l * beta_l
alpha_u = y_u
beta_u = x_l
gamma_u = -alpha_u * beta_u
return alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u
@staticmethod
def get_bound_square(x_l, x_u):
x_m = F.relu(x_l) - F.relu(-x_u)
alpha_l = 2 * x_m
gamma_l = -x_m * x_m
alpha_u = x_l + x_u
gamma_u = -x_l * x_u
beta_l = torch.zeros_like(x_l)
beta_u = beta_l
return alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u
@staticmethod
def _relax(x, y):
if x is y:
return BoundMul.get_bound_square(x.lower, x.upper)
x_l, x_u = x.lower, x.upper
y_l, y_u = y.lower, y.upper
for k in [1, -1]:
x_l = x_l + k * y_l
x_u = x_u + k * y_u
for k in [1, -1]:
y_l = y_l + k * x_l
y_u = y_u + k * x_u
return BoundMul.get_bound_mul(x_l, x_u, y_l, y_u)
def bound_backward(self, last_lA, last_uA, x, y):
alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u = BoundMul._relax(x,
y)
alpha_l, alpha_u = alpha_l.unsqueeze(0), alpha_u.unsqueeze(0)
beta_l, beta_u = beta_l.unsqueeze(0), beta_u.unsqueeze(0)
def _bound_oneside(last_A, alpha_pos, beta_pos, gamma_pos,
alpha_neg, beta_neg, gamma_neg):
if last_A is None:
return None, None, 0
last_A_pos, last_A_neg = last_A.clamp(min=0), last_A.clamp(max=0)
A_x = last_A_pos * alpha_pos + last_A_neg * alpha_neg
A_y = last_A_pos * beta_pos + last_A_neg * beta_neg
last_A = last_A.reshape(last_A.shape[0], last_A.shape[1], -1)
A_x = self.broadcast_backward(A_x, x)
A_y = self.broadcast_backward(A_y, y)
bias = self.get_bias(last_A_pos, gamma_pos) + self.get_bias(
last_A_neg, gamma_neg)
return A_x, A_y, bias
lA_x, lA_y, lbias = _bound_oneside(last_lA, alpha_l, beta_l,
gamma_l, alpha_u, beta_u, gamma_u)
uA_x, uA_y, ubias = _bound_oneside(last_uA, alpha_u, beta_u,
gamma_u, alpha_l, beta_l, gamma_l)
return [(lA_x, uA_x), (lA_y, uA_y)], lbias, ubias
@staticmethod
def bound_forward(dim_in, x, y):
x_lw, x_lb, x_uw, x_ub = x.lw, x.lb, x.uw, x.ub
y_lw, y_lb, y_uw, y_ub = y.lw, y.lb, y.uw, y.ub
alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u = BoundMul._relax(x,
y)
if x_lw is None:
x_lw = 0
if y_lw is None:
y_lw = 0
if x_uw is None:
x_uw = 0
if y_uw is None:
y_uw = 0
lw = alpha_l.unsqueeze(1).clamp(min=0) * x_lw + alpha_l.unsqueeze(1
).clamp(max=0) * x_uw
lw = lw + beta_l.unsqueeze(1).clamp(min=0) * y_lw + beta_l.unsqueeze(1
).clamp(max=0) * y_uw
lb = alpha_l.clamp(min=0) * x_lb + alpha_l.clamp(max=0
) * x_ub + beta_l.clamp(min=0) * y_lb + beta_l.clamp(max=0
) * y_ub + gamma_l
uw = alpha_u.unsqueeze(1).clamp(max=0) * x_lw + alpha_u.unsqueeze(1
).clamp(min=0) * x_uw
uw = uw + beta_u.unsqueeze(1).clamp(max=0) * y_lw + beta_u.unsqueeze(1
).clamp(min=0) * y_uw
ub = alpha_u.clamp(max=0) * x_lb + alpha_u.clamp(min=0
) * x_ub + beta_u.clamp(max=0) * y_lb + beta_u.clamp(min=0
) * y_ub + gamma_u
return LinearBound(lw, lb, uw, ub)
@staticmethod
def interval_propagate(*v):
x, y = v[0], v[1]
if x is y:
h_L, h_U = v[0]
r0 = h_L * h_L
r1 = h_U * h_U
l = F.relu(h_L) - F.relu(-h_U)
return l * l, torch.max(r0, r1)
r0, r1, r2, r3 = x[0] * y[0], x[0] * y[1], x[1] * y[0], x[1] * y[1]
lower = torch.min(torch.min(r0, r1), torch.min(r2, r3))
upper = torch.max(torch.max(r0, r1), torch.max(r2, r3))
return lower, upper
@staticmethod
def infer_batch_dim(batch_size, *x):
if x[0] == -1:
return x[1]
elif x[1] == -1:
return x[0]
else:
assert x[0] == x[1]
return x[0]
class BoundSub(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
def forward(self, x, y):
self.x_shape = x.shape
self.y_shape = y.shape
return x - y
def bound_backward(self, last_lA, last_uA, x, y):
def _bound_oneside(last_A, w, sign=-1):
if last_A is None:
return None
return self.broadcast_backward(sign * last_A, w)
uA_x = _bound_oneside(last_uA, x, sign=1)
uA_y = _bound_oneside(last_uA, y, sign=-1)
lA_x = _bound_oneside(last_lA, x, sign=1)
lA_y = _bound_oneside(last_lA, y, sign=-1)
return [(lA_x, uA_x), (lA_y, uA_y)], 0, 0
def bound_forward(self, dim_in, x, y):
x_lw, x_lb, x_uw, x_ub = Bound.broadcast_forward(dim_in, x, self.
default_shape)
y_lw, y_lb, y_uw, y_ub = Bound.broadcast_forward(dim_in, y, self.
default_shape)
lw, lb = x_lw - y_uw, x_lb - y_ub
uw, ub = x_uw - y_lw, x_ub - y_lb
return LinearBound(lw, lb, uw, ub)
def interval_propagate(self, x, y):
return x[0] - y[1], x[1] - y[0]
def infer_batch_dim(self, batch_size, *x):
return BoundMul.infer_batch_dim(batch_size, *x)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_name': 4, 'name': 4, 'ori_name': 4, 'attr': 4,
'inputs': 4, 'output_index': 4, 'options': _mock_config(loss_fusion
=MSELoss()), 'device': 0}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from numbers import Number
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
if not isinstance(eps, Number):
if not isinstance(eps, torch.Tensor):
self.eps = torch.tensor(eps)
else:
self.eps = eps
if len(self.eps.shape) == 1:
self.eps = torch.diag(self.eps)
assert self.eps.shape[0] == self.eps.shape[1
], 'Argument [eps] must form a n by n square matrix.'
self.norm = 2
else:
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
if isinstance(self.eps, Number):
deviation = A.norm(self.dual_norm, -1) * self.eps
else:
deviation = A.matmul(self.eps.transpose(0, 1)).norm(
self.dual_norm, -1)
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
elif isinstance(self.eps, Number):
bound = x + sign * self.eps
else:
bound = x + sign * self.eps.transpose(0, 1).norm(self.
dual_norm, -1)
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
center = (x_U + x_L) / 2.0
diff = (x_U - x_L) / 2.0
if not A.identity == 1:
unfold_input = F.unfold(center, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound = prod.view(prod.size(0), prod.size(1), int(math.
sqrt(prod.size(2))), int(math.sqrt(prod.size(2))))
unfold_input = F.unfold(diff, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches.abs()
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound += sign * prod.view(prod.size(0), prod.size(1),
int(math.sqrt(prod.size(2))), int(math.sqrt(prod.
size(2))))
else:
bound = center + sign * diff
return bound
else:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
raise NotImplementedError()
if isinstance(A, eyeC) or isinstance(A, torch.Tensor):
return concretize_matrix(A)
elif isinstance(A, Patches):
return concretize_patches(A)
elif isinstance(A, BoundList):
for b in A.bound_list:
if isinstance(b, eyeC) or isinstance(b, torch.Tensor):
pass
else:
raise NotImplementedError()
def init(self, x, aux=None, forward=False):
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
else:
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
if self.norm == np.inf:
if self.x_L is None and self.x_U is None:
return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps)
else:
return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})'
.format(self.eps, self.x_L, self.x_U))
else:
return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm,
self.eps)
class PerturbationSynonym(Perturbation):
def __init__(self, budget, eps=1.0, use_simple=False):
super(PerturbationSynonym, self).__init__()
self._load_synonyms()
self.budget = budget
self.eps = eps
self.use_simple = use_simple
self.model = None
self.train = False
def __repr__(self):
return (
'perturbation(Synonym-based word substitution budget={}, eps={})'
.format(self.budget, self.eps))
def _load_synonyms(self, path='data/synonyms.json'):
with open(path) as file:
self.synonym = json.loads(file.read())
logger.info('Synonym list loaded for {} words'.format(len(self.
synonym)))
def set_train(self, train):
self.train = train
def concretize(self, x, A, sign, aux):
assert self.model is not None
x_rep, mask, can_be_replaced = aux
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
dim_out = A.shape[1]
max_num_cand = x_rep.shape[2]
mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32,
device=A.device)
num_pos = int(np.max(np.sum(can_be_replaced, axis=-1)))
update_A = A.shape[-1] > num_pos * dim_word
if update_A:
bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape(
batch_size, -1, 1)).squeeze(-1)
else:
bias = 0.0
A = A.reshape(batch_size, dim_out, -1, dim_word)
A_new, x_new, x_rep_new, mask_new = [], [], [], []
zeros_A = torch.zeros(dim_out, dim_word, device=A.device)
zeros_w = torch.zeros(dim_word, device=A.device)
zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device)
zeros_mask = torch.zeros(max_num_cand, device=A.device)
for t in range(batch_size):
cnt = 0
for i in range(0, length):
if can_be_replaced[t][i]:
if update_A:
A_new.append(A[t, :, i, :])
x_new.append(x[t][i])
x_rep_new.append(x_rep[t][i])
mask_new.append(mask[t][i])
cnt += 1
if update_A:
A_new += [zeros_A] * (num_pos - cnt)
x_new += [zeros_w] * (num_pos - cnt)
x_rep_new += [zeros_rep] * (num_pos - cnt)
mask_new += [zeros_mask] * (num_pos - cnt)
if update_A:
A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word
).transpose(1, 2)
x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word)
x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos,
max_num_cand, dim_word)
mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand)
length = num_pos
A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2)
x = x.reshape(batch_size, length, -1, 1)
if sign == 1:
cmp, init = torch.max, -1e+30
else:
cmp, init = torch.min, 1e+30
init_tensor = torch.ones(batch_size, dim_out) * init
dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1)
]
dp[0][0] = torch.zeros(batch_size, dim_out)
A = A.reshape(batch_size * length, A.shape[2], A.shape[3])
Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x.
shape[3])).reshape(batch_size, length, A.shape[1])
Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length,
max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size,
length, A.shape[1], max_num_cand)
Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2)
Ax_rep_bound = cmp(Ax_rep, dim=-1).values
if self.use_simple and self.train:
return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias
for i in range(1, length + 1):
dp[i][0] = dp[i - 1][0] + Ax[:, i - 1]
for j in range(1, self.budget + 1):
dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1
] + Ax_rep_bound[:, i - 1])
dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1,
batch_size, dim_out)
return cmp(dp, dim=0).values + bias
def init(self, x, aux=None, forward=False):
tokens, batch = aux
self.tokens = tokens
assert len(x.shape) == 3
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
max_pos = 1
can_be_replaced = np.zeros((batch_size, length), dtype=np.bool)
self._build_substitution(batch)
for t in range(batch_size):
cnt = 0
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
for i in range(len(tokens[t])):
if tokens[t][i] == '[UNK]' or len(candidates[i]
) == 0 or tokens[t][i] != candidates[i][0]:
continue
for w in candidates[i][1:]:
if w in self.model.vocab:
can_be_replaced[t][i] = True
cnt += 1
break
max_pos = max(max_pos, cnt)
dim = max_pos * dim_word
if forward:
eye = torch.eye(dim_word)
lw = torch.zeros(batch_size, dim, length, dim_word)
lb = torch.zeros_like(x)
word_embeddings = self.model.word_embeddings.weight
vocab = self.model.vocab
x_rep = [[[] for i in range(length)] for t in range(batch_size)]
max_num_cand = 1
for t in range(batch_size):
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
cnt = 0
for i in range(length):
if can_be_replaced[t][i]:
word_embed = word_embeddings[vocab[tokens[t][i]]]
other_embed = x[t, i] - word_embed
if forward:
lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye
lb[t, i, :] = torch.zeros_like(word_embed)
for w in candidates[i][1:]:
if w in self.model.vocab:
x_rep[t][i].append(word_embeddings[self.model.
vocab[w]] + other_embed)
max_num_cand = max(max_num_cand, len(x_rep[t][i]))
cnt += 1
elif forward:
lb[t, i, :] = x[t, i, :]
if forward:
uw, ub = lw, lb
else:
lw = lb = uw = ub = None
zeros = torch.zeros(dim_word, device=x.device)
x_rep_, mask = [], []
for t in range(batch_size):
for i in range(length):
x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep
[t][i]))
mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len(
x_rep[t][i]))
x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand,
dim_word)
mask = torch.tensor(mask, dtype=torch.float32, device=x.device
).reshape(batch_size, length, max_num_cand)
x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps)
inf = 1e+20
lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * inf, dim=2).values
upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * -inf, dim=2).values
lower = torch.min(lower, x)
upper = torch.max(upper, x)
return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask,
can_be_replaced)
def _build_substitution(self, batch):
for t, example in enumerate(batch):
if 'candidates' not in example or example['candidates'] is None:
candidates = []
tokens = example['sentence'].strip().lower().split(' ')
for i in range(len(tokens)):
_cand = []
if tokens[i] in self.synonym:
for w in self.synonym[tokens[i]]:
if w in self.model.vocab:
_cand.append(w)
if len(_cand) > 0:
_cand = [tokens[i]] + _cand
candidates.append(_cand)
example['candidates'] = candidates
class Interval(tuple):
def __new__(self, lb=None, ub=None, ptb=None):
if ub is None:
assert isinstance(lb, tuple)
lb, ub = lb
return tuple.__new__(Interval, (lb, ub))
def __init__(self, lb, ub, ptb=None):
if ptb is None:
self.ptb = None
assert lb is ub
elif not isinstance(ptb, Perturbation):
raise ValueError(
'ptb must be a Perturbation object or None. Got type {}'.
format(type(ptb)))
else:
self.ptb = ptb
def __str__(self):
return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb)
def __repr__(self):
return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1],
self.ptb)
"""Checking if the other interval is tuple, keep the perturbation."""
@staticmethod
def make_interval(lb, ub, other):
if isinstance(other, Interval):
return Interval(lb, ub, other.ptb)
else:
return lb, ub
"""Given a tuple or Interval object, returns the norm and eps."""
@staticmethod
def get_perturbation(interval):
if isinstance(interval, Interval):
if isinstance(interval.ptb, PerturbationLpNorm):
return interval.ptb.norm, interval.ptb.eps
elif isinstance(interval.ptb, PerturbationSynonym):
return np.inf, 1.0
elif isinstance(interval.ptb, PerturbationL0Norm):
return 0, interval.ptb.eps, interval.ptb.ratio
elif interval.ptb is None:
raise RuntimeError(
'get_perturbation() encountered an interval that is not perturbed.'
)
else:
raise RuntimeError(
'get_perturbation() does not know how to handle {}'.
format(type(interval.ptb)))
else:
return np.inf, np.nan
"""Checking if a Interval or tuple object has perturbation enabled."""
@staticmethod
def is_perturbed(interval):
if isinstance(interval, Interval) and interval.ptb is None:
return False
else:
return True
class Bound(nn.Module):
def __init__(self, input_name, name, ori_name, attr={}, inputs=[],
output_index=0, options={}, device=None):
super().__init__()
self.output_name = []
(self.input_name, self.name, self.ori_name, self.attr, self.inputs,
self.output_index, self.options, self.device) = (input_name,
name, ori_name, attr, inputs, output_index, options, device)
self.fv = None
self.from_input = False
self.bounded = False
self.IBP_rets = None
self.perturbed = False
if options is not None and 'loss_fusion' in options:
self.loss_fusion = options['loss_fusion']
else:
self.loss_fusion = False
"""Check if the i-th input is with perturbation or not."""
def is_input_perturbed(self, i=0):
return self.inputs[i].perturbed
def forward(self, *x):
raise NotImplementedError
def interval_propagate(self, *v):
assert len(v) == 1
h_L, h_U = v[0]
return Interval.make_interval(self.forward(h_L), self.forward(h_U),
v[0])
def bound_forward(self, dim_in, last):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA):
raise NotImplementedError
def infer_batch_dim(self, batch_size, *x):
None
raise NotImplementedError
def broadcast_backward(self, A, x):
shape = x.default_shape
batch_dim = max(self.batch_dim, 0)
if isinstance(A, torch.Tensor):
if x.batch_dim == -1:
shape = torch.Size([A.shape[batch_dim + 1]] + list(shape))
dims = []
cnt_sum = A.ndim - len(shape) - 1
for i in range(1, A.ndim):
if i != self.batch_dim + 1 and cnt_sum > 0:
dims.append(i)
cnt_sum -= 1
if dims:
A = torch.sum(A, dim=dims)
else:
dims = list(range(1, 1 + A.ndim - 1 - len(shape)))
if dims:
A = torch.sum(A, dim=dims)
dims = []
for i in range(len(shape)):
if shape[i] == 1 and A.shape[i + 1] != 1:
dims.append(i + 1)
if dims:
A = torch.sum(A, dim=dims, keepdim=True)
assert A.shape[1:] == shape
elif type(A) == Patches:
pass
return A
@staticmethod
def broadcast_forward(dim_in, x, shape_res):
lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub
shape_x, shape_res = list(x.lb.shape), list(shape_res)
if lw is None:
lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device)
has_batch_size = False
else:
has_batch_size = True
while len(shape_x) < len(shape_res):
if not has_batch_size:
lw, uw = lw.unsqueeze(0), uw.unsqueeze(0)
lb, ub = lb.unsqueeze(0), ub.unsqueeze(0)
shape_x = [1] + shape_x
has_batch_size = True
else:
lw, uw = lw.unsqueeze(2), uw.unsqueeze(2)
lb, ub = lb.unsqueeze(1), ub.unsqueeze(1)
shape_x = [shape_x[0], 1] + shape_x[1:]
repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))]
lb, ub = lb.repeat(*repeat), ub.repeat(*repeat)
repeat = repeat[:1] + [1] + repeat[1:]
lw, uw = lw.repeat(*repeat), uw.repeat(*repeat)
return lw, lb, uw, ub
def get_bias(self, A, bias):
if A is None:
return 0
assert not isnan(A)
assert not isnan(bias)
if isinstance(A, torch.Tensor):
if torch.norm(A, p=1) < epsilon:
return 0
output_dim = A.shape[0]
if self.batch_dim != -1:
batch_size = A.shape[self.batch_dim + 1]
A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1
]).astype(np.int32), batch_size, np.prod(A.shape[self.
batch_dim + 2:]).astype(np.int32)]
A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size,
output_dim, -1)
bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape(
batch_size, -1, 1)
bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1)
else:
batch_size = A.shape[1]
A = A.view(output_dim, batch_size, -1)
bias_new = A.matmul(bias.view(-1))
if isnan(bias_new):
return 0
else:
return bias_new
elif type(A) == Patches:
if torch.norm(A.patches, p=1) < epsilon:
return 0
if self.batch_dim != -1:
batch_size = bias.shape[0]
bias = F.unfold(bias, kernel_size=A.patches.size(-1),
stride=A.stride, padding=A.padding).transpose(-2, -1
).unsqueeze(-2)
bias.size(1)
patches = A.patches.view(A.patches.size(0), A.patches.size(
1), A.patches.size(-4), A.patches.size(-1) * A.patches.
size(-2) * A.patches.size(-3))
prod = bias * patches
bias_new = prod.sum(-1).transpose(-2, -1)
bias_new = bias_new.view(batch_size, bias_new.size(-2), int
(math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new.
size(-1))))
else:
patches = A.patches
patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias
patches_reshape = patches_reshape.transpose(-1, -2)
return patches_reshape.view(patches_reshape.size(0),
patches_reshape.size(1), int(math.sqrt(patches_reshape.
size(2))), -1).transpose(0, 1)
return bias_new
else:
return NotImplementedError()
class BoundMul(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
self.nonlinear = True
def forward(self, x, y):
self.x_shape = x.shape
self.y_shape = y.shape
return x * y
@staticmethod
def get_bound_mul(x_l, x_u, y_l, y_u):
alpha_l = y_l
beta_l = x_l
gamma_l = -alpha_l * beta_l
alpha_u = y_u
beta_u = x_l
gamma_u = -alpha_u * beta_u
return alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u
@staticmethod
def get_bound_square(x_l, x_u):
x_m = F.relu(x_l) - F.relu(-x_u)
alpha_l = 2 * x_m
gamma_l = -x_m * x_m
alpha_u = x_l + x_u
gamma_u = -x_l * x_u
beta_l = torch.zeros_like(x_l)
beta_u = beta_l
return alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u
@staticmethod
def _relax(x, y):
if x is y:
return BoundMul.get_bound_square(x.lower, x.upper)
x_l, x_u = x.lower, x.upper
y_l, y_u = y.lower, y.upper
for k in [1, -1]:
x_l = x_l + k * y_l
x_u = x_u + k * y_u
for k in [1, -1]:
y_l = y_l + k * x_l
y_u = y_u + k * x_u
return BoundMul.get_bound_mul(x_l, x_u, y_l, y_u)
def bound_backward(self, last_lA, last_uA, x, y):
alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u = BoundMul._relax(x,
y)
alpha_l, alpha_u = alpha_l.unsqueeze(0), alpha_u.unsqueeze(0)
beta_l, beta_u = beta_l.unsqueeze(0), beta_u.unsqueeze(0)
def _bound_oneside(last_A, alpha_pos, beta_pos, gamma_pos,
alpha_neg, beta_neg, gamma_neg):
if last_A is None:
return None, None, 0
last_A_pos, last_A_neg = last_A.clamp(min=0), last_A.clamp(max=0)
A_x = last_A_pos * alpha_pos + last_A_neg * alpha_neg
A_y = last_A_pos * beta_pos + last_A_neg * beta_neg
last_A = last_A.reshape(last_A.shape[0], last_A.shape[1], -1)
A_x = self.broadcast_backward(A_x, x)
A_y = self.broadcast_backward(A_y, y)
bias = self.get_bias(last_A_pos, gamma_pos) + self.get_bias(
last_A_neg, gamma_neg)
return A_x, A_y, bias
lA_x, lA_y, lbias = _bound_oneside(last_lA, alpha_l, beta_l,
gamma_l, alpha_u, beta_u, gamma_u)
uA_x, uA_y, ubias = _bound_oneside(last_uA, alpha_u, beta_u,
gamma_u, alpha_l, beta_l, gamma_l)
return [(lA_x, uA_x), (lA_y, uA_y)], lbias, ubias
@staticmethod
def bound_forward(dim_in, x, y):
x_lw, x_lb, x_uw, x_ub = x.lw, x.lb, x.uw, x.ub
y_lw, y_lb, y_uw, y_ub = y.lw, y.lb, y.uw, y.ub
alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u = BoundMul._relax(x,
y)
if x_lw is None:
x_lw = 0
if y_lw is None:
y_lw = 0
if x_uw is None:
x_uw = 0
if y_uw is None:
y_uw = 0
lw = alpha_l.unsqueeze(1).clamp(min=0) * x_lw + alpha_l.unsqueeze(1
).clamp(max=0) * x_uw
lw = lw + beta_l.unsqueeze(1).clamp(min=0) * y_lw + beta_l.unsqueeze(1
).clamp(max=0) * y_uw
lb = alpha_l.clamp(min=0) * x_lb + alpha_l.clamp(max=0
) * x_ub + beta_l.clamp(min=0) * y_lb + beta_l.clamp(max=0
) * y_ub + gamma_l
uw = alpha_u.unsqueeze(1).clamp(max=0) * x_lw + alpha_u.unsqueeze(1
).clamp(min=0) * x_uw
uw = uw + beta_u.unsqueeze(1).clamp(max=0) * y_lw + beta_u.unsqueeze(1
).clamp(min=0) * y_uw
ub = alpha_u.clamp(max=0) * x_lb + alpha_u.clamp(min=0
) * x_ub + beta_u.clamp(max=0) * y_lb + beta_u.clamp(min=0
) * y_ub + gamma_u
return LinearBound(lw, lb, uw, ub)
@staticmethod
def interval_propagate(*v):
x, y = v[0], v[1]
if x is y:
h_L, h_U = v[0]
r0 = h_L * h_L
r1 = h_U * h_U
l = F.relu(h_L) - F.relu(-h_U)
return l * l, torch.max(r0, r1)
r0, r1, r2, r3 = x[0] * y[0], x[0] * y[1], x[1] * y[0], x[1] * y[1]
lower = torch.min(torch.min(r0, r1), torch.min(r2, r3))
upper = torch.max(torch.max(r0, r1), torch.max(r2, r3))
return lower, upper
@staticmethod
def infer_batch_dim(batch_size, *x):
if x[0] == -1:
return x[1]
elif x[1] == -1:
return x[0]
else:
assert x[0] == x[1]
return x[0]
class BoundSubNew(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
def bound_backward(self, last_lA, last_uA, x, y):
def _bound_oneside(last_A, w, sign=-1):
if last_A is None:
return None
return self.broadcast_backward(sign * last_A, w)
uA_x = _bound_oneside(last_uA, x, sign=1)
uA_y = _bound_oneside(last_uA, y, sign=-1)
lA_x = _bound_oneside(last_lA, x, sign=1)
lA_y = _bound_oneside(last_lA, y, sign=-1)
return [(lA_x, uA_x), (lA_y, uA_y)], 0, 0
def bound_forward(self, dim_in, x, y):
x_lw, x_lb, x_uw, x_ub = Bound.broadcast_forward(dim_in, x, self.
default_shape)
y_lw, y_lb, y_uw, y_ub = Bound.broadcast_forward(dim_in, y, self.
default_shape)
lw, lb = x_lw - y_uw, x_lb - y_ub
uw, ub = x_uw - y_lw, x_ub - y_lb
return LinearBound(lw, lb, uw, ub)
def interval_propagate(self, x, y):
return x[0] - y[1], x[1] - y[0]
def infer_batch_dim(self, batch_size, *x):
return BoundMul.infer_batch_dim(batch_size, *x)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Mahoumaru/auto_LiRPA | BoundSub | false | 13,229 | [
"BSD-3-Clause"
]
| 0 | b03a6c36eb1b921726778359d6d2b94e0cd7e480 | https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480 |
BoundEqual | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/al/calhisv2tizhq3s3r7egazungn7zoqv2w3alcz76uykvsknhyvtu.py
# Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq]
# Source node to ATen node mapping:
# eq => eq
# Graph fragment:
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
triton_poi_fused_eq_0 = async_compile.triton('triton_poi_fused_eq_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_eq_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq]
stream0 = get_raw_stream(0)
triton_poi_fused_eq_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from numbers import Number
from torch.nn import MSELoss
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
if not isinstance(eps, Number):
if not isinstance(eps, torch.Tensor):
self.eps = torch.tensor(eps)
else:
self.eps = eps
if len(self.eps.shape) == 1:
self.eps = torch.diag(self.eps)
assert self.eps.shape[0] == self.eps.shape[1
], 'Argument [eps] must form a n by n square matrix.'
self.norm = 2
else:
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
if isinstance(self.eps, Number):
deviation = A.norm(self.dual_norm, -1) * self.eps
else:
deviation = A.matmul(self.eps.transpose(0, 1)).norm(
self.dual_norm, -1)
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
elif isinstance(self.eps, Number):
bound = x + sign * self.eps
else:
bound = x + sign * self.eps.transpose(0, 1).norm(self.
dual_norm, -1)
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
center = (x_U + x_L) / 2.0
diff = (x_U - x_L) / 2.0
if not A.identity == 1:
unfold_input = F.unfold(center, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound = prod.view(prod.size(0), prod.size(1), int(math.
sqrt(prod.size(2))), int(math.sqrt(prod.size(2))))
unfold_input = F.unfold(diff, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches.abs()
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound += sign * prod.view(prod.size(0), prod.size(1),
int(math.sqrt(prod.size(2))), int(math.sqrt(prod.
size(2))))
else:
bound = center + sign * diff
return bound
else:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
raise NotImplementedError()
if isinstance(A, eyeC) or isinstance(A, torch.Tensor):
return concretize_matrix(A)
elif isinstance(A, Patches):
return concretize_patches(A)
elif isinstance(A, BoundList):
for b in A.bound_list:
if isinstance(b, eyeC) or isinstance(b, torch.Tensor):
pass
else:
raise NotImplementedError()
def init(self, x, aux=None, forward=False):
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
else:
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
if self.norm == np.inf:
if self.x_L is None and self.x_U is None:
return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps)
else:
return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})'
.format(self.eps, self.x_L, self.x_U))
else:
return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm,
self.eps)
class PerturbationSynonym(Perturbation):
def __init__(self, budget, eps=1.0, use_simple=False):
super(PerturbationSynonym, self).__init__()
self._load_synonyms()
self.budget = budget
self.eps = eps
self.use_simple = use_simple
self.model = None
self.train = False
def __repr__(self):
return (
'perturbation(Synonym-based word substitution budget={}, eps={})'
.format(self.budget, self.eps))
def _load_synonyms(self, path='data/synonyms.json'):
with open(path) as file:
self.synonym = json.loads(file.read())
logger.info('Synonym list loaded for {} words'.format(len(self.
synonym)))
def set_train(self, train):
self.train = train
def concretize(self, x, A, sign, aux):
assert self.model is not None
x_rep, mask, can_be_replaced = aux
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
dim_out = A.shape[1]
max_num_cand = x_rep.shape[2]
mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32,
device=A.device)
num_pos = int(np.max(np.sum(can_be_replaced, axis=-1)))
update_A = A.shape[-1] > num_pos * dim_word
if update_A:
bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape(
batch_size, -1, 1)).squeeze(-1)
else:
bias = 0.0
A = A.reshape(batch_size, dim_out, -1, dim_word)
A_new, x_new, x_rep_new, mask_new = [], [], [], []
zeros_A = torch.zeros(dim_out, dim_word, device=A.device)
zeros_w = torch.zeros(dim_word, device=A.device)
zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device)
zeros_mask = torch.zeros(max_num_cand, device=A.device)
for t in range(batch_size):
cnt = 0
for i in range(0, length):
if can_be_replaced[t][i]:
if update_A:
A_new.append(A[t, :, i, :])
x_new.append(x[t][i])
x_rep_new.append(x_rep[t][i])
mask_new.append(mask[t][i])
cnt += 1
if update_A:
A_new += [zeros_A] * (num_pos - cnt)
x_new += [zeros_w] * (num_pos - cnt)
x_rep_new += [zeros_rep] * (num_pos - cnt)
mask_new += [zeros_mask] * (num_pos - cnt)
if update_A:
A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word
).transpose(1, 2)
x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word)
x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos,
max_num_cand, dim_word)
mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand)
length = num_pos
A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2)
x = x.reshape(batch_size, length, -1, 1)
if sign == 1:
cmp, init = torch.max, -1e+30
else:
cmp, init = torch.min, 1e+30
init_tensor = torch.ones(batch_size, dim_out) * init
dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1)
]
dp[0][0] = torch.zeros(batch_size, dim_out)
A = A.reshape(batch_size * length, A.shape[2], A.shape[3])
Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x.
shape[3])).reshape(batch_size, length, A.shape[1])
Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length,
max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size,
length, A.shape[1], max_num_cand)
Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2)
Ax_rep_bound = cmp(Ax_rep, dim=-1).values
if self.use_simple and self.train:
return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias
for i in range(1, length + 1):
dp[i][0] = dp[i - 1][0] + Ax[:, i - 1]
for j in range(1, self.budget + 1):
dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1
] + Ax_rep_bound[:, i - 1])
dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1,
batch_size, dim_out)
return cmp(dp, dim=0).values + bias
def init(self, x, aux=None, forward=False):
tokens, batch = aux
self.tokens = tokens
assert len(x.shape) == 3
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
max_pos = 1
can_be_replaced = np.zeros((batch_size, length), dtype=np.bool)
self._build_substitution(batch)
for t in range(batch_size):
cnt = 0
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
for i in range(len(tokens[t])):
if tokens[t][i] == '[UNK]' or len(candidates[i]
) == 0 or tokens[t][i] != candidates[i][0]:
continue
for w in candidates[i][1:]:
if w in self.model.vocab:
can_be_replaced[t][i] = True
cnt += 1
break
max_pos = max(max_pos, cnt)
dim = max_pos * dim_word
if forward:
eye = torch.eye(dim_word)
lw = torch.zeros(batch_size, dim, length, dim_word)
lb = torch.zeros_like(x)
word_embeddings = self.model.word_embeddings.weight
vocab = self.model.vocab
x_rep = [[[] for i in range(length)] for t in range(batch_size)]
max_num_cand = 1
for t in range(batch_size):
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
cnt = 0
for i in range(length):
if can_be_replaced[t][i]:
word_embed = word_embeddings[vocab[tokens[t][i]]]
other_embed = x[t, i] - word_embed
if forward:
lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye
lb[t, i, :] = torch.zeros_like(word_embed)
for w in candidates[i][1:]:
if w in self.model.vocab:
x_rep[t][i].append(word_embeddings[self.model.
vocab[w]] + other_embed)
max_num_cand = max(max_num_cand, len(x_rep[t][i]))
cnt += 1
elif forward:
lb[t, i, :] = x[t, i, :]
if forward:
uw, ub = lw, lb
else:
lw = lb = uw = ub = None
zeros = torch.zeros(dim_word, device=x.device)
x_rep_, mask = [], []
for t in range(batch_size):
for i in range(length):
x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep
[t][i]))
mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len(
x_rep[t][i]))
x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand,
dim_word)
mask = torch.tensor(mask, dtype=torch.float32, device=x.device
).reshape(batch_size, length, max_num_cand)
x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps)
inf = 1e+20
lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * inf, dim=2).values
upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * -inf, dim=2).values
lower = torch.min(lower, x)
upper = torch.max(upper, x)
return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask,
can_be_replaced)
def _build_substitution(self, batch):
for t, example in enumerate(batch):
if 'candidates' not in example or example['candidates'] is None:
candidates = []
tokens = example['sentence'].strip().lower().split(' ')
for i in range(len(tokens)):
_cand = []
if tokens[i] in self.synonym:
for w in self.synonym[tokens[i]]:
if w in self.model.vocab:
_cand.append(w)
if len(_cand) > 0:
_cand = [tokens[i]] + _cand
candidates.append(_cand)
example['candidates'] = candidates
class Interval(tuple):
def __new__(self, lb=None, ub=None, ptb=None):
if ub is None:
assert isinstance(lb, tuple)
lb, ub = lb
return tuple.__new__(Interval, (lb, ub))
def __init__(self, lb, ub, ptb=None):
if ptb is None:
self.ptb = None
assert lb is ub
elif not isinstance(ptb, Perturbation):
raise ValueError(
'ptb must be a Perturbation object or None. Got type {}'.
format(type(ptb)))
else:
self.ptb = ptb
def __str__(self):
return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb)
def __repr__(self):
return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1],
self.ptb)
"""Checking if the other interval is tuple, keep the perturbation."""
@staticmethod
def make_interval(lb, ub, other):
if isinstance(other, Interval):
return Interval(lb, ub, other.ptb)
else:
return lb, ub
"""Given a tuple or Interval object, returns the norm and eps."""
@staticmethod
def get_perturbation(interval):
if isinstance(interval, Interval):
if isinstance(interval.ptb, PerturbationLpNorm):
return interval.ptb.norm, interval.ptb.eps
elif isinstance(interval.ptb, PerturbationSynonym):
return np.inf, 1.0
elif isinstance(interval.ptb, PerturbationL0Norm):
return 0, interval.ptb.eps, interval.ptb.ratio
elif interval.ptb is None:
raise RuntimeError(
'get_perturbation() encountered an interval that is not perturbed.'
)
else:
raise RuntimeError(
'get_perturbation() does not know how to handle {}'.
format(type(interval.ptb)))
else:
return np.inf, np.nan
"""Checking if a Interval or tuple object has perturbation enabled."""
@staticmethod
def is_perturbed(interval):
if isinstance(interval, Interval) and interval.ptb is None:
return False
else:
return True
class Bound(nn.Module):
def __init__(self, input_name, name, ori_name, attr={}, inputs=[],
output_index=0, options={}, device=None):
super().__init__()
self.output_name = []
(self.input_name, self.name, self.ori_name, self.attr, self.inputs,
self.output_index, self.options, self.device) = (input_name,
name, ori_name, attr, inputs, output_index, options, device)
self.fv = None
self.from_input = False
self.bounded = False
self.IBP_rets = None
self.perturbed = False
if options is not None and 'loss_fusion' in options:
self.loss_fusion = options['loss_fusion']
else:
self.loss_fusion = False
"""Check if the i-th input is with perturbation or not."""
def is_input_perturbed(self, i=0):
return self.inputs[i].perturbed
def forward(self, *x):
raise NotImplementedError
def interval_propagate(self, *v):
assert len(v) == 1
h_L, h_U = v[0]
return Interval.make_interval(self.forward(h_L), self.forward(h_U),
v[0])
def bound_forward(self, dim_in, last):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA):
raise NotImplementedError
def infer_batch_dim(self, batch_size, *x):
None
raise NotImplementedError
def broadcast_backward(self, A, x):
shape = x.default_shape
batch_dim = max(self.batch_dim, 0)
if isinstance(A, torch.Tensor):
if x.batch_dim == -1:
shape = torch.Size([A.shape[batch_dim + 1]] + list(shape))
dims = []
cnt_sum = A.ndim - len(shape) - 1
for i in range(1, A.ndim):
if i != self.batch_dim + 1 and cnt_sum > 0:
dims.append(i)
cnt_sum -= 1
if dims:
A = torch.sum(A, dim=dims)
else:
dims = list(range(1, 1 + A.ndim - 1 - len(shape)))
if dims:
A = torch.sum(A, dim=dims)
dims = []
for i in range(len(shape)):
if shape[i] == 1 and A.shape[i + 1] != 1:
dims.append(i + 1)
if dims:
A = torch.sum(A, dim=dims, keepdim=True)
assert A.shape[1:] == shape
elif type(A) == Patches:
pass
return A
@staticmethod
def broadcast_forward(dim_in, x, shape_res):
lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub
shape_x, shape_res = list(x.lb.shape), list(shape_res)
if lw is None:
lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device)
has_batch_size = False
else:
has_batch_size = True
while len(shape_x) < len(shape_res):
if not has_batch_size:
lw, uw = lw.unsqueeze(0), uw.unsqueeze(0)
lb, ub = lb.unsqueeze(0), ub.unsqueeze(0)
shape_x = [1] + shape_x
has_batch_size = True
else:
lw, uw = lw.unsqueeze(2), uw.unsqueeze(2)
lb, ub = lb.unsqueeze(1), ub.unsqueeze(1)
shape_x = [shape_x[0], 1] + shape_x[1:]
repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))]
lb, ub = lb.repeat(*repeat), ub.repeat(*repeat)
repeat = repeat[:1] + [1] + repeat[1:]
lw, uw = lw.repeat(*repeat), uw.repeat(*repeat)
return lw, lb, uw, ub
def get_bias(self, A, bias):
if A is None:
return 0
assert not isnan(A)
assert not isnan(bias)
if isinstance(A, torch.Tensor):
if torch.norm(A, p=1) < epsilon:
return 0
output_dim = A.shape[0]
if self.batch_dim != -1:
batch_size = A.shape[self.batch_dim + 1]
A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1
]).astype(np.int32), batch_size, np.prod(A.shape[self.
batch_dim + 2:]).astype(np.int32)]
A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size,
output_dim, -1)
bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape(
batch_size, -1, 1)
bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1)
else:
batch_size = A.shape[1]
A = A.view(output_dim, batch_size, -1)
bias_new = A.matmul(bias.view(-1))
if isnan(bias_new):
return 0
else:
return bias_new
elif type(A) == Patches:
if torch.norm(A.patches, p=1) < epsilon:
return 0
if self.batch_dim != -1:
batch_size = bias.shape[0]
bias = F.unfold(bias, kernel_size=A.patches.size(-1),
stride=A.stride, padding=A.padding).transpose(-2, -1
).unsqueeze(-2)
bias.size(1)
patches = A.patches.view(A.patches.size(0), A.patches.size(
1), A.patches.size(-4), A.patches.size(-1) * A.patches.
size(-2) * A.patches.size(-3))
prod = bias * patches
bias_new = prod.sum(-1).transpose(-2, -1)
bias_new = bias_new.view(batch_size, bias_new.size(-2), int
(math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new.
size(-1))))
else:
patches = A.patches
patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias
patches_reshape = patches_reshape.transpose(-1, -2)
return patches_reshape.view(patches_reshape.size(0),
patches_reshape.size(1), int(math.sqrt(patches_reshape.
size(2))), -1).transpose(0, 1)
return bias_new
else:
return NotImplementedError()
class BoundMul(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
self.nonlinear = True
def forward(self, x, y):
self.x_shape = x.shape
self.y_shape = y.shape
return x * y
@staticmethod
def get_bound_mul(x_l, x_u, y_l, y_u):
alpha_l = y_l
beta_l = x_l
gamma_l = -alpha_l * beta_l
alpha_u = y_u
beta_u = x_l
gamma_u = -alpha_u * beta_u
return alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u
@staticmethod
def get_bound_square(x_l, x_u):
x_m = F.relu(x_l) - F.relu(-x_u)
alpha_l = 2 * x_m
gamma_l = -x_m * x_m
alpha_u = x_l + x_u
gamma_u = -x_l * x_u
beta_l = torch.zeros_like(x_l)
beta_u = beta_l
return alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u
@staticmethod
def _relax(x, y):
if x is y:
return BoundMul.get_bound_square(x.lower, x.upper)
x_l, x_u = x.lower, x.upper
y_l, y_u = y.lower, y.upper
for k in [1, -1]:
x_l = x_l + k * y_l
x_u = x_u + k * y_u
for k in [1, -1]:
y_l = y_l + k * x_l
y_u = y_u + k * x_u
return BoundMul.get_bound_mul(x_l, x_u, y_l, y_u)
def bound_backward(self, last_lA, last_uA, x, y):
alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u = BoundMul._relax(x,
y)
alpha_l, alpha_u = alpha_l.unsqueeze(0), alpha_u.unsqueeze(0)
beta_l, beta_u = beta_l.unsqueeze(0), beta_u.unsqueeze(0)
def _bound_oneside(last_A, alpha_pos, beta_pos, gamma_pos,
alpha_neg, beta_neg, gamma_neg):
if last_A is None:
return None, None, 0
last_A_pos, last_A_neg = last_A.clamp(min=0), last_A.clamp(max=0)
A_x = last_A_pos * alpha_pos + last_A_neg * alpha_neg
A_y = last_A_pos * beta_pos + last_A_neg * beta_neg
last_A = last_A.reshape(last_A.shape[0], last_A.shape[1], -1)
A_x = self.broadcast_backward(A_x, x)
A_y = self.broadcast_backward(A_y, y)
bias = self.get_bias(last_A_pos, gamma_pos) + self.get_bias(
last_A_neg, gamma_neg)
return A_x, A_y, bias
lA_x, lA_y, lbias = _bound_oneside(last_lA, alpha_l, beta_l,
gamma_l, alpha_u, beta_u, gamma_u)
uA_x, uA_y, ubias = _bound_oneside(last_uA, alpha_u, beta_u,
gamma_u, alpha_l, beta_l, gamma_l)
return [(lA_x, uA_x), (lA_y, uA_y)], lbias, ubias
@staticmethod
def bound_forward(dim_in, x, y):
x_lw, x_lb, x_uw, x_ub = x.lw, x.lb, x.uw, x.ub
y_lw, y_lb, y_uw, y_ub = y.lw, y.lb, y.uw, y.ub
alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u = BoundMul._relax(x,
y)
if x_lw is None:
x_lw = 0
if y_lw is None:
y_lw = 0
if x_uw is None:
x_uw = 0
if y_uw is None:
y_uw = 0
lw = alpha_l.unsqueeze(1).clamp(min=0) * x_lw + alpha_l.unsqueeze(1
).clamp(max=0) * x_uw
lw = lw + beta_l.unsqueeze(1).clamp(min=0) * y_lw + beta_l.unsqueeze(1
).clamp(max=0) * y_uw
lb = alpha_l.clamp(min=0) * x_lb + alpha_l.clamp(max=0
) * x_ub + beta_l.clamp(min=0) * y_lb + beta_l.clamp(max=0
) * y_ub + gamma_l
uw = alpha_u.unsqueeze(1).clamp(max=0) * x_lw + alpha_u.unsqueeze(1
).clamp(min=0) * x_uw
uw = uw + beta_u.unsqueeze(1).clamp(max=0) * y_lw + beta_u.unsqueeze(1
).clamp(min=0) * y_uw
ub = alpha_u.clamp(max=0) * x_lb + alpha_u.clamp(min=0
) * x_ub + beta_u.clamp(max=0) * y_lb + beta_u.clamp(min=0
) * y_ub + gamma_u
return LinearBound(lw, lb, uw, ub)
@staticmethod
def interval_propagate(*v):
x, y = v[0], v[1]
if x is y:
h_L, h_U = v[0]
r0 = h_L * h_L
r1 = h_U * h_U
l = F.relu(h_L) - F.relu(-h_U)
return l * l, torch.max(r0, r1)
r0, r1, r2, r3 = x[0] * y[0], x[0] * y[1], x[1] * y[0], x[1] * y[1]
lower = torch.min(torch.min(r0, r1), torch.min(r2, r3))
upper = torch.max(torch.max(r0, r1), torch.max(r2, r3))
return lower, upper
@staticmethod
def infer_batch_dim(batch_size, *x):
if x[0] == -1:
return x[1]
elif x[1] == -1:
return x[0]
else:
assert x[0] == x[1]
return x[0]
class BoundEqual(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
def forward(self, x, y):
return x == y
def infer_batch_dim(self, batch_size, *x):
return BoundMul.infer_batch_dim(batch_size, *x)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_name': 4, 'name': 4, 'ori_name': 4, 'attr': 4,
'inputs': 4, 'output_index': 4, 'options': _mock_config(loss_fusion
=MSELoss()), 'device': 0}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from numbers import Number
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_eq_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_eq_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
if not isinstance(eps, Number):
if not isinstance(eps, torch.Tensor):
self.eps = torch.tensor(eps)
else:
self.eps = eps
if len(self.eps.shape) == 1:
self.eps = torch.diag(self.eps)
assert self.eps.shape[0] == self.eps.shape[1
], 'Argument [eps] must form a n by n square matrix.'
self.norm = 2
else:
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
if isinstance(self.eps, Number):
deviation = A.norm(self.dual_norm, -1) * self.eps
else:
deviation = A.matmul(self.eps.transpose(0, 1)).norm(
self.dual_norm, -1)
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
elif isinstance(self.eps, Number):
bound = x + sign * self.eps
else:
bound = x + sign * self.eps.transpose(0, 1).norm(self.
dual_norm, -1)
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
center = (x_U + x_L) / 2.0
diff = (x_U - x_L) / 2.0
if not A.identity == 1:
unfold_input = F.unfold(center, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound = prod.view(prod.size(0), prod.size(1), int(math.
sqrt(prod.size(2))), int(math.sqrt(prod.size(2))))
unfold_input = F.unfold(diff, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches.abs()
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound += sign * prod.view(prod.size(0), prod.size(1),
int(math.sqrt(prod.size(2))), int(math.sqrt(prod.
size(2))))
else:
bound = center + sign * diff
return bound
else:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
raise NotImplementedError()
if isinstance(A, eyeC) or isinstance(A, torch.Tensor):
return concretize_matrix(A)
elif isinstance(A, Patches):
return concretize_patches(A)
elif isinstance(A, BoundList):
for b in A.bound_list:
if isinstance(b, eyeC) or isinstance(b, torch.Tensor):
pass
else:
raise NotImplementedError()
def init(self, x, aux=None, forward=False):
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
else:
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
if self.norm == np.inf:
if self.x_L is None and self.x_U is None:
return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps)
else:
return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})'
.format(self.eps, self.x_L, self.x_U))
else:
return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm,
self.eps)
class PerturbationSynonym(Perturbation):
def __init__(self, budget, eps=1.0, use_simple=False):
super(PerturbationSynonym, self).__init__()
self._load_synonyms()
self.budget = budget
self.eps = eps
self.use_simple = use_simple
self.model = None
self.train = False
def __repr__(self):
return (
'perturbation(Synonym-based word substitution budget={}, eps={})'
.format(self.budget, self.eps))
def _load_synonyms(self, path='data/synonyms.json'):
with open(path) as file:
self.synonym = json.loads(file.read())
logger.info('Synonym list loaded for {} words'.format(len(self.
synonym)))
def set_train(self, train):
self.train = train
def concretize(self, x, A, sign, aux):
assert self.model is not None
x_rep, mask, can_be_replaced = aux
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
dim_out = A.shape[1]
max_num_cand = x_rep.shape[2]
mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32,
device=A.device)
num_pos = int(np.max(np.sum(can_be_replaced, axis=-1)))
update_A = A.shape[-1] > num_pos * dim_word
if update_A:
bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape(
batch_size, -1, 1)).squeeze(-1)
else:
bias = 0.0
A = A.reshape(batch_size, dim_out, -1, dim_word)
A_new, x_new, x_rep_new, mask_new = [], [], [], []
zeros_A = torch.zeros(dim_out, dim_word, device=A.device)
zeros_w = torch.zeros(dim_word, device=A.device)
zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device)
zeros_mask = torch.zeros(max_num_cand, device=A.device)
for t in range(batch_size):
cnt = 0
for i in range(0, length):
if can_be_replaced[t][i]:
if update_A:
A_new.append(A[t, :, i, :])
x_new.append(x[t][i])
x_rep_new.append(x_rep[t][i])
mask_new.append(mask[t][i])
cnt += 1
if update_A:
A_new += [zeros_A] * (num_pos - cnt)
x_new += [zeros_w] * (num_pos - cnt)
x_rep_new += [zeros_rep] * (num_pos - cnt)
mask_new += [zeros_mask] * (num_pos - cnt)
if update_A:
A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word
).transpose(1, 2)
x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word)
x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos,
max_num_cand, dim_word)
mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand)
length = num_pos
A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2)
x = x.reshape(batch_size, length, -1, 1)
if sign == 1:
cmp, init = torch.max, -1e+30
else:
cmp, init = torch.min, 1e+30
init_tensor = torch.ones(batch_size, dim_out) * init
dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1)
]
dp[0][0] = torch.zeros(batch_size, dim_out)
A = A.reshape(batch_size * length, A.shape[2], A.shape[3])
Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x.
shape[3])).reshape(batch_size, length, A.shape[1])
Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length,
max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size,
length, A.shape[1], max_num_cand)
Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2)
Ax_rep_bound = cmp(Ax_rep, dim=-1).values
if self.use_simple and self.train:
return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias
for i in range(1, length + 1):
dp[i][0] = dp[i - 1][0] + Ax[:, i - 1]
for j in range(1, self.budget + 1):
dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1
] + Ax_rep_bound[:, i - 1])
dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1,
batch_size, dim_out)
return cmp(dp, dim=0).values + bias
def init(self, x, aux=None, forward=False):
tokens, batch = aux
self.tokens = tokens
assert len(x.shape) == 3
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
max_pos = 1
can_be_replaced = np.zeros((batch_size, length), dtype=np.bool)
self._build_substitution(batch)
for t in range(batch_size):
cnt = 0
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
for i in range(len(tokens[t])):
if tokens[t][i] == '[UNK]' or len(candidates[i]
) == 0 or tokens[t][i] != candidates[i][0]:
continue
for w in candidates[i][1:]:
if w in self.model.vocab:
can_be_replaced[t][i] = True
cnt += 1
break
max_pos = max(max_pos, cnt)
dim = max_pos * dim_word
if forward:
eye = torch.eye(dim_word)
lw = torch.zeros(batch_size, dim, length, dim_word)
lb = torch.zeros_like(x)
word_embeddings = self.model.word_embeddings.weight
vocab = self.model.vocab
x_rep = [[[] for i in range(length)] for t in range(batch_size)]
max_num_cand = 1
for t in range(batch_size):
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
cnt = 0
for i in range(length):
if can_be_replaced[t][i]:
word_embed = word_embeddings[vocab[tokens[t][i]]]
other_embed = x[t, i] - word_embed
if forward:
lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye
lb[t, i, :] = torch.zeros_like(word_embed)
for w in candidates[i][1:]:
if w in self.model.vocab:
x_rep[t][i].append(word_embeddings[self.model.
vocab[w]] + other_embed)
max_num_cand = max(max_num_cand, len(x_rep[t][i]))
cnt += 1
elif forward:
lb[t, i, :] = x[t, i, :]
if forward:
uw, ub = lw, lb
else:
lw = lb = uw = ub = None
zeros = torch.zeros(dim_word, device=x.device)
x_rep_, mask = [], []
for t in range(batch_size):
for i in range(length):
x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep
[t][i]))
mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len(
x_rep[t][i]))
x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand,
dim_word)
mask = torch.tensor(mask, dtype=torch.float32, device=x.device
).reshape(batch_size, length, max_num_cand)
x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps)
inf = 1e+20
lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * inf, dim=2).values
upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * -inf, dim=2).values
lower = torch.min(lower, x)
upper = torch.max(upper, x)
return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask,
can_be_replaced)
def _build_substitution(self, batch):
for t, example in enumerate(batch):
if 'candidates' not in example or example['candidates'] is None:
candidates = []
tokens = example['sentence'].strip().lower().split(' ')
for i in range(len(tokens)):
_cand = []
if tokens[i] in self.synonym:
for w in self.synonym[tokens[i]]:
if w in self.model.vocab:
_cand.append(w)
if len(_cand) > 0:
_cand = [tokens[i]] + _cand
candidates.append(_cand)
example['candidates'] = candidates
class Interval(tuple):
def __new__(self, lb=None, ub=None, ptb=None):
if ub is None:
assert isinstance(lb, tuple)
lb, ub = lb
return tuple.__new__(Interval, (lb, ub))
def __init__(self, lb, ub, ptb=None):
if ptb is None:
self.ptb = None
assert lb is ub
elif not isinstance(ptb, Perturbation):
raise ValueError(
'ptb must be a Perturbation object or None. Got type {}'.
format(type(ptb)))
else:
self.ptb = ptb
def __str__(self):
return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb)
def __repr__(self):
return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1],
self.ptb)
"""Checking if the other interval is tuple, keep the perturbation."""
@staticmethod
def make_interval(lb, ub, other):
if isinstance(other, Interval):
return Interval(lb, ub, other.ptb)
else:
return lb, ub
"""Given a tuple or Interval object, returns the norm and eps."""
@staticmethod
def get_perturbation(interval):
if isinstance(interval, Interval):
if isinstance(interval.ptb, PerturbationLpNorm):
return interval.ptb.norm, interval.ptb.eps
elif isinstance(interval.ptb, PerturbationSynonym):
return np.inf, 1.0
elif isinstance(interval.ptb, PerturbationL0Norm):
return 0, interval.ptb.eps, interval.ptb.ratio
elif interval.ptb is None:
raise RuntimeError(
'get_perturbation() encountered an interval that is not perturbed.'
)
else:
raise RuntimeError(
'get_perturbation() does not know how to handle {}'.
format(type(interval.ptb)))
else:
return np.inf, np.nan
"""Checking if a Interval or tuple object has perturbation enabled."""
@staticmethod
def is_perturbed(interval):
if isinstance(interval, Interval) and interval.ptb is None:
return False
else:
return True
class Bound(nn.Module):
def __init__(self, input_name, name, ori_name, attr={}, inputs=[],
output_index=0, options={}, device=None):
super().__init__()
self.output_name = []
(self.input_name, self.name, self.ori_name, self.attr, self.inputs,
self.output_index, self.options, self.device) = (input_name,
name, ori_name, attr, inputs, output_index, options, device)
self.fv = None
self.from_input = False
self.bounded = False
self.IBP_rets = None
self.perturbed = False
if options is not None and 'loss_fusion' in options:
self.loss_fusion = options['loss_fusion']
else:
self.loss_fusion = False
"""Check if the i-th input is with perturbation or not."""
def is_input_perturbed(self, i=0):
return self.inputs[i].perturbed
def forward(self, *x):
raise NotImplementedError
def interval_propagate(self, *v):
assert len(v) == 1
h_L, h_U = v[0]
return Interval.make_interval(self.forward(h_L), self.forward(h_U),
v[0])
def bound_forward(self, dim_in, last):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA):
raise NotImplementedError
def infer_batch_dim(self, batch_size, *x):
None
raise NotImplementedError
def broadcast_backward(self, A, x):
shape = x.default_shape
batch_dim = max(self.batch_dim, 0)
if isinstance(A, torch.Tensor):
if x.batch_dim == -1:
shape = torch.Size([A.shape[batch_dim + 1]] + list(shape))
dims = []
cnt_sum = A.ndim - len(shape) - 1
for i in range(1, A.ndim):
if i != self.batch_dim + 1 and cnt_sum > 0:
dims.append(i)
cnt_sum -= 1
if dims:
A = torch.sum(A, dim=dims)
else:
dims = list(range(1, 1 + A.ndim - 1 - len(shape)))
if dims:
A = torch.sum(A, dim=dims)
dims = []
for i in range(len(shape)):
if shape[i] == 1 and A.shape[i + 1] != 1:
dims.append(i + 1)
if dims:
A = torch.sum(A, dim=dims, keepdim=True)
assert A.shape[1:] == shape
elif type(A) == Patches:
pass
return A
@staticmethod
def broadcast_forward(dim_in, x, shape_res):
lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub
shape_x, shape_res = list(x.lb.shape), list(shape_res)
if lw is None:
lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device)
has_batch_size = False
else:
has_batch_size = True
while len(shape_x) < len(shape_res):
if not has_batch_size:
lw, uw = lw.unsqueeze(0), uw.unsqueeze(0)
lb, ub = lb.unsqueeze(0), ub.unsqueeze(0)
shape_x = [1] + shape_x
has_batch_size = True
else:
lw, uw = lw.unsqueeze(2), uw.unsqueeze(2)
lb, ub = lb.unsqueeze(1), ub.unsqueeze(1)
shape_x = [shape_x[0], 1] + shape_x[1:]
repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))]
lb, ub = lb.repeat(*repeat), ub.repeat(*repeat)
repeat = repeat[:1] + [1] + repeat[1:]
lw, uw = lw.repeat(*repeat), uw.repeat(*repeat)
return lw, lb, uw, ub
def get_bias(self, A, bias):
if A is None:
return 0
assert not isnan(A)
assert not isnan(bias)
if isinstance(A, torch.Tensor):
if torch.norm(A, p=1) < epsilon:
return 0
output_dim = A.shape[0]
if self.batch_dim != -1:
batch_size = A.shape[self.batch_dim + 1]
A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1
]).astype(np.int32), batch_size, np.prod(A.shape[self.
batch_dim + 2:]).astype(np.int32)]
A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size,
output_dim, -1)
bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape(
batch_size, -1, 1)
bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1)
else:
batch_size = A.shape[1]
A = A.view(output_dim, batch_size, -1)
bias_new = A.matmul(bias.view(-1))
if isnan(bias_new):
return 0
else:
return bias_new
elif type(A) == Patches:
if torch.norm(A.patches, p=1) < epsilon:
return 0
if self.batch_dim != -1:
batch_size = bias.shape[0]
bias = F.unfold(bias, kernel_size=A.patches.size(-1),
stride=A.stride, padding=A.padding).transpose(-2, -1
).unsqueeze(-2)
bias.size(1)
patches = A.patches.view(A.patches.size(0), A.patches.size(
1), A.patches.size(-4), A.patches.size(-1) * A.patches.
size(-2) * A.patches.size(-3))
prod = bias * patches
bias_new = prod.sum(-1).transpose(-2, -1)
bias_new = bias_new.view(batch_size, bias_new.size(-2), int
(math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new.
size(-1))))
else:
patches = A.patches
patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias
patches_reshape = patches_reshape.transpose(-1, -2)
return patches_reshape.view(patches_reshape.size(0),
patches_reshape.size(1), int(math.sqrt(patches_reshape.
size(2))), -1).transpose(0, 1)
return bias_new
else:
return NotImplementedError()
class BoundMul(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
self.nonlinear = True
def forward(self, x, y):
self.x_shape = x.shape
self.y_shape = y.shape
return x * y
@staticmethod
def get_bound_mul(x_l, x_u, y_l, y_u):
alpha_l = y_l
beta_l = x_l
gamma_l = -alpha_l * beta_l
alpha_u = y_u
beta_u = x_l
gamma_u = -alpha_u * beta_u
return alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u
@staticmethod
def get_bound_square(x_l, x_u):
x_m = F.relu(x_l) - F.relu(-x_u)
alpha_l = 2 * x_m
gamma_l = -x_m * x_m
alpha_u = x_l + x_u
gamma_u = -x_l * x_u
beta_l = torch.zeros_like(x_l)
beta_u = beta_l
return alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u
@staticmethod
def _relax(x, y):
if x is y:
return BoundMul.get_bound_square(x.lower, x.upper)
x_l, x_u = x.lower, x.upper
y_l, y_u = y.lower, y.upper
for k in [1, -1]:
x_l = x_l + k * y_l
x_u = x_u + k * y_u
for k in [1, -1]:
y_l = y_l + k * x_l
y_u = y_u + k * x_u
return BoundMul.get_bound_mul(x_l, x_u, y_l, y_u)
def bound_backward(self, last_lA, last_uA, x, y):
alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u = BoundMul._relax(x,
y)
alpha_l, alpha_u = alpha_l.unsqueeze(0), alpha_u.unsqueeze(0)
beta_l, beta_u = beta_l.unsqueeze(0), beta_u.unsqueeze(0)
def _bound_oneside(last_A, alpha_pos, beta_pos, gamma_pos,
alpha_neg, beta_neg, gamma_neg):
if last_A is None:
return None, None, 0
last_A_pos, last_A_neg = last_A.clamp(min=0), last_A.clamp(max=0)
A_x = last_A_pos * alpha_pos + last_A_neg * alpha_neg
A_y = last_A_pos * beta_pos + last_A_neg * beta_neg
last_A = last_A.reshape(last_A.shape[0], last_A.shape[1], -1)
A_x = self.broadcast_backward(A_x, x)
A_y = self.broadcast_backward(A_y, y)
bias = self.get_bias(last_A_pos, gamma_pos) + self.get_bias(
last_A_neg, gamma_neg)
return A_x, A_y, bias
lA_x, lA_y, lbias = _bound_oneside(last_lA, alpha_l, beta_l,
gamma_l, alpha_u, beta_u, gamma_u)
uA_x, uA_y, ubias = _bound_oneside(last_uA, alpha_u, beta_u,
gamma_u, alpha_l, beta_l, gamma_l)
return [(lA_x, uA_x), (lA_y, uA_y)], lbias, ubias
@staticmethod
def bound_forward(dim_in, x, y):
x_lw, x_lb, x_uw, x_ub = x.lw, x.lb, x.uw, x.ub
y_lw, y_lb, y_uw, y_ub = y.lw, y.lb, y.uw, y.ub
alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u = BoundMul._relax(x,
y)
if x_lw is None:
x_lw = 0
if y_lw is None:
y_lw = 0
if x_uw is None:
x_uw = 0
if y_uw is None:
y_uw = 0
lw = alpha_l.unsqueeze(1).clamp(min=0) * x_lw + alpha_l.unsqueeze(1
).clamp(max=0) * x_uw
lw = lw + beta_l.unsqueeze(1).clamp(min=0) * y_lw + beta_l.unsqueeze(1
).clamp(max=0) * y_uw
lb = alpha_l.clamp(min=0) * x_lb + alpha_l.clamp(max=0
) * x_ub + beta_l.clamp(min=0) * y_lb + beta_l.clamp(max=0
) * y_ub + gamma_l
uw = alpha_u.unsqueeze(1).clamp(max=0) * x_lw + alpha_u.unsqueeze(1
).clamp(min=0) * x_uw
uw = uw + beta_u.unsqueeze(1).clamp(max=0) * y_lw + beta_u.unsqueeze(1
).clamp(min=0) * y_uw
ub = alpha_u.clamp(max=0) * x_lb + alpha_u.clamp(min=0
) * x_ub + beta_u.clamp(max=0) * y_lb + beta_u.clamp(min=0
) * y_ub + gamma_u
return LinearBound(lw, lb, uw, ub)
@staticmethod
def interval_propagate(*v):
x, y = v[0], v[1]
if x is y:
h_L, h_U = v[0]
r0 = h_L * h_L
r1 = h_U * h_U
l = F.relu(h_L) - F.relu(-h_U)
return l * l, torch.max(r0, r1)
r0, r1, r2, r3 = x[0] * y[0], x[0] * y[1], x[1] * y[0], x[1] * y[1]
lower = torch.min(torch.min(r0, r1), torch.min(r2, r3))
upper = torch.max(torch.max(r0, r1), torch.max(r2, r3))
return lower, upper
@staticmethod
def infer_batch_dim(batch_size, *x):
if x[0] == -1:
return x[1]
elif x[1] == -1:
return x[0]
else:
assert x[0] == x[1]
return x[0]
class BoundEqualNew(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
def infer_batch_dim(self, batch_size, *x):
return BoundMul.infer_batch_dim(batch_size, *x)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Mahoumaru/auto_LiRPA | BoundEqual | false | 13,230 | [
"BSD-3-Clause"
]
| 0 | b03a6c36eb1b921726778359d6d2b94e0cd7e480 | https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480 |
MMFB | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/34/c34lnuw3wok4euydis4jz2cumdv5zl53hr2km2mr6sokjcomm2j6.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm => pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_3, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1, 2, 3], True), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
triton_poi_fused__weight_norm_interface_0 = async_compile.triton('triton_poi_fused__weight_norm_interface_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (6*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (6*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (6*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (6*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (4 + (6*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (5 + (6*x0)), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tl.store(out_ptr0 + (x0), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/4h/c4hyivh4rlcg5ap7pj7xbbqscs2u2jjb4g5w4jaidlc7wpyz7lbi.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm => div, mul
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %pow_2), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %div), kwargs = {})
triton_poi_fused__weight_norm_interface_1 = async_compile.triton('triton_poi_fused__weight_norm_interface_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 108
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 6)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ij/cijtnciymslu5jmlqs4skpm4lp2nibv5hh3th6b5i2szhhqwwhmh.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %mul, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 3), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 294912
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 18
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/mz/cmzalccs4mduopsp2yd4zky3wwaeepvbubqparxslix3sntpfkub.py
# Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm_1 => div_1, mul_1, pow_3, pow_4, sum_2
# Graph fragment:
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_6, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1, 2, 3], True), kwargs = {})
# %pow_4 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_5, %pow_4), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_6, %div_1), kwargs = {})
triton_per_fused__weight_norm_interface_3 = async_compile.triton('triton_per_fused__weight_norm_interface_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[32, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__weight_norm_interface_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 18
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (9*x0)), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr0 + (r1 + (9*x0)), tmp9, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/dy/cdylggyn2ws3sfvdukozllsfp2dvd7jch6eitsmkngzuzggssgxt.py
# Topologically Sorted Source Nodes: [_weight_norm_2], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm_2 => div_2, mul_2, pow_5, pow_6, sum_3
# Graph fragment:
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_9, 2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [1, 2, 3], True), kwargs = {})
# %pow_6 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 0.5), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_8, %pow_6), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_9, %div_2), kwargs = {})
triton_per_fused__weight_norm_interface_4 = async_compile.triton('triton_per_fused__weight_norm_interface_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 32],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__weight_norm_interface_4(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 9
rnumel = 18
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (18*x0)), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr0 + (r1 + (18*x0)), tmp9, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/4r/c4rv2lnvemalwfmwemwkkjuqab3eovephhf3dkwsqaqrzbzurn6e.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%where, %where_1], 1), kwargs = {})
triton_poi_fused_cat_5 = async_compile.triton('triton_poi_fused_cat_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 294912
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 18
x0 = xindex % 4096
x2 = (xindex // 73728)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 9, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (36864*x2)), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = 0.0
tmp9 = tmp7 > tmp8
tmp10 = 0.2
tmp11 = tmp7 * tmp10
tmp12 = tl.where(tmp9, tmp7, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 18, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tl.load(in_ptr2 + (x0 + (4096*((-9) + x1)) + (36864*x2)), tmp15, other=0.0)
tmp19 = tl.load(in_ptr3 + ((-9) + x1), tmp15, eviction_policy='evict_last', other=0.0)
tmp20 = tmp18 + tmp19
tmp21 = tmp20 > tmp8
tmp22 = tmp20 * tmp10
tmp23 = tl.where(tmp21, tmp20, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp15, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp14, tmp25)
tl.store(out_ptr0 + (x3), tmp26, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/nt/cntzp5nakwvul7tcparxahaerbizrgojsic6at2feukyiwjvutbv.py
# Topologically Sorted Source Nodes: [_weight_norm_18], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm_18 => div_18, mul_24, pow_37, pow_38, sum_19
# Graph fragment:
# %pow_37 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_57, 2), kwargs = {})
# %sum_19 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_37, [1, 2, 3], True), kwargs = {})
# %pow_38 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_19, 0.5), kwargs = {})
# %div_18 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_56, %pow_38), kwargs = {})
# %mul_24 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_57, %div_18), kwargs = {})
triton_per_fused__weight_norm_interface_6 = async_compile.triton('triton_per_fused__weight_norm_interface_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[32, 32],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__weight_norm_interface_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 18
rnumel = 18
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (18*x0)), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr0 + (r1 + (18*x0)), tmp9, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/yr/cyrzws2lnjde2as7gzejnwjcj4lymm7bgicdqzp4zy6p5j7qtkpv.py
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# out => convolution_18
# out_1 => add
# Graph fragment:
# %convolution_18 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_4, %mul_24, %primals_58, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_18, %primals_1), kwargs = {})
triton_poi_fused_add_convolution_7 = async_compile.triton('triton_poi_fused_add_convolution_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 294912
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 18
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/5c/c5cr6hikvbeku2cgirxdojhbmam6elnnw462rhvsvtf6lygfd5l3.py
# Topologically Sorted Source Nodes: [out_10, out_11, out_12], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# out_10 => convolution_113
# out_11 => add_5
# out_12 => add_6
# Graph fragment:
# %convolution_113 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_29, %mul_149, %primals_343, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_113, %add_4), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %primals_1), kwargs = {})
triton_poi_fused_add_convolution_8 = async_compile.triton('triton_poi_fused_add_convolution_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 294912
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 18
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), None)
tmp5 = tl.load(in_ptr2 + (x3), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(in_out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/re/crenqthrjosmipn372bljwzk5utlupuft2zj53j6fgv7ecgvqz5k.py
# Topologically Sorted Source Nodes: [x_107, c2_18], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# c2_18 => gt_35, mul_148, where_35
# x_107 => convolution_112
# Graph fragment:
# %convolution_112 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_111, %mul_147, %primals_340, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_35 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_112, 0), kwargs = {})
# %mul_148 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_112, 0.2), kwargs = {})
# %where_35 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_35, %convolution_112, %mul_148), kwargs = {})
# %gt_36 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_35, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 147456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 9
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + (x3), tmp8, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154, primals_155, primals_156, primals_157, primals_158, primals_159, primals_160, primals_161, primals_162, primals_163, primals_164, primals_165, primals_166, primals_167, primals_168, primals_169, primals_170, primals_171, primals_172, primals_173, primals_174, primals_175, primals_176, primals_177, primals_178, primals_179, primals_180, primals_181, primals_182, primals_183, primals_184, primals_185, primals_186, primals_187, primals_188, primals_189, primals_190, primals_191, primals_192, primals_193, primals_194, primals_195, primals_196, primals_197, primals_198, primals_199, primals_200, primals_201, primals_202, primals_203, primals_204, primals_205, primals_206, primals_207, primals_208, primals_209, primals_210, primals_211, primals_212, primals_213, primals_214, primals_215, primals_216, primals_217, primals_218, primals_219, primals_220, primals_221, primals_222, primals_223, primals_224, primals_225, primals_226, primals_227, primals_228, primals_229, primals_230, primals_231, primals_232, primals_233, primals_234, primals_235, primals_236, primals_237, primals_238, primals_239, primals_240, primals_241, primals_242, primals_243, primals_244, primals_245, primals_246, primals_247, primals_248, primals_249, primals_250, primals_251, primals_252, primals_253, primals_254, primals_255, primals_256, primals_257, primals_258, primals_259, primals_260, primals_261, primals_262, primals_263, primals_264, primals_265, primals_266, primals_267, primals_268, primals_269, primals_270, primals_271, primals_272, primals_273, primals_274, primals_275, primals_276, primals_277, primals_278, primals_279, primals_280, primals_281, primals_282, primals_283, primals_284, primals_285, primals_286, primals_287, primals_288, primals_289, primals_290, primals_291, primals_292, primals_293, primals_294, primals_295, primals_296, primals_297, primals_298, primals_299, primals_300, primals_301, primals_302, primals_303, primals_304, primals_305, primals_306, primals_307, primals_308, primals_309, primals_310, primals_311, primals_312, primals_313, primals_314, primals_315, primals_316, primals_317, primals_318, primals_319, primals_320, primals_321, primals_322, primals_323, primals_324, primals_325, primals_326, primals_327, primals_328, primals_329, primals_330, primals_331, primals_332, primals_333, primals_334, primals_335, primals_336, primals_337, primals_338, primals_339, primals_340, primals_341, primals_342, primals_343 = args
args.clear()
assert_size_stride(primals_1, (4, 18, 64, 64), (73728, 4096, 64, 1))
assert_size_stride(primals_2, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_3, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_4, (18, ), (1, ))
assert_size_stride(primals_5, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_6, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_7, (18, ), (1, ))
assert_size_stride(primals_8, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_9, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_10, (9, ), (1, ))
assert_size_stride(primals_11, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_12, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_13, (18, ), (1, ))
assert_size_stride(primals_14, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_15, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_16, (18, ), (1, ))
assert_size_stride(primals_17, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_18, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_19, (9, ), (1, ))
assert_size_stride(primals_20, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_21, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_22, (18, ), (1, ))
assert_size_stride(primals_23, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_24, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_25, (18, ), (1, ))
assert_size_stride(primals_26, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_27, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_28, (9, ), (1, ))
assert_size_stride(primals_29, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_30, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_31, (18, ), (1, ))
assert_size_stride(primals_32, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_33, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_34, (18, ), (1, ))
assert_size_stride(primals_35, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_36, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_37, (9, ), (1, ))
assert_size_stride(primals_38, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_39, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_40, (18, ), (1, ))
assert_size_stride(primals_41, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_42, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_43, (18, ), (1, ))
assert_size_stride(primals_44, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_45, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_46, (9, ), (1, ))
assert_size_stride(primals_47, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_48, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_49, (18, ), (1, ))
assert_size_stride(primals_50, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_51, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_52, (18, ), (1, ))
assert_size_stride(primals_53, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_54, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_55, (9, ), (1, ))
assert_size_stride(primals_56, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_57, (18, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_58, (18, ), (1, ))
assert_size_stride(primals_59, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_60, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_61, (18, ), (1, ))
assert_size_stride(primals_62, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_63, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_64, (18, ), (1, ))
assert_size_stride(primals_65, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_66, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_67, (9, ), (1, ))
assert_size_stride(primals_68, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_69, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_70, (18, ), (1, ))
assert_size_stride(primals_71, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_72, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_73, (18, ), (1, ))
assert_size_stride(primals_74, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_75, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_76, (9, ), (1, ))
assert_size_stride(primals_77, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_78, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_79, (18, ), (1, ))
assert_size_stride(primals_80, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_81, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_82, (18, ), (1, ))
assert_size_stride(primals_83, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_84, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_85, (9, ), (1, ))
assert_size_stride(primals_86, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_87, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_88, (18, ), (1, ))
assert_size_stride(primals_89, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_90, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_91, (18, ), (1, ))
assert_size_stride(primals_92, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_93, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_94, (9, ), (1, ))
assert_size_stride(primals_95, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_96, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_97, (18, ), (1, ))
assert_size_stride(primals_98, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_99, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_100, (18, ), (1, ))
assert_size_stride(primals_101, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_102, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_103, (9, ), (1, ))
assert_size_stride(primals_104, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_105, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_106, (18, ), (1, ))
assert_size_stride(primals_107, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_108, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_109, (18, ), (1, ))
assert_size_stride(primals_110, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_111, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_112, (9, ), (1, ))
assert_size_stride(primals_113, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_114, (18, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_115, (18, ), (1, ))
assert_size_stride(primals_116, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_117, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_118, (18, ), (1, ))
assert_size_stride(primals_119, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_120, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_121, (18, ), (1, ))
assert_size_stride(primals_122, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_123, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_124, (9, ), (1, ))
assert_size_stride(primals_125, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_126, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_127, (18, ), (1, ))
assert_size_stride(primals_128, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_129, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_130, (18, ), (1, ))
assert_size_stride(primals_131, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_132, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_133, (9, ), (1, ))
assert_size_stride(primals_134, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_135, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_136, (18, ), (1, ))
assert_size_stride(primals_137, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_138, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_139, (18, ), (1, ))
assert_size_stride(primals_140, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_141, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_142, (9, ), (1, ))
assert_size_stride(primals_143, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_144, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_145, (18, ), (1, ))
assert_size_stride(primals_146, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_147, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_148, (18, ), (1, ))
assert_size_stride(primals_149, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_150, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_151, (9, ), (1, ))
assert_size_stride(primals_152, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_153, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_154, (18, ), (1, ))
assert_size_stride(primals_155, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_156, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_157, (18, ), (1, ))
assert_size_stride(primals_158, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_159, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_160, (9, ), (1, ))
assert_size_stride(primals_161, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_162, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_163, (18, ), (1, ))
assert_size_stride(primals_164, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_165, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_166, (18, ), (1, ))
assert_size_stride(primals_167, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_168, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_169, (9, ), (1, ))
assert_size_stride(primals_170, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_171, (18, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_172, (18, ), (1, ))
assert_size_stride(primals_173, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_174, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_175, (18, ), (1, ))
assert_size_stride(primals_176, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_177, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_178, (18, ), (1, ))
assert_size_stride(primals_179, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_180, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_181, (9, ), (1, ))
assert_size_stride(primals_182, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_183, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_184, (18, ), (1, ))
assert_size_stride(primals_185, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_186, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_187, (18, ), (1, ))
assert_size_stride(primals_188, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_189, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_190, (9, ), (1, ))
assert_size_stride(primals_191, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_192, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_193, (18, ), (1, ))
assert_size_stride(primals_194, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_195, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_196, (18, ), (1, ))
assert_size_stride(primals_197, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_198, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_199, (9, ), (1, ))
assert_size_stride(primals_200, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_201, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_202, (18, ), (1, ))
assert_size_stride(primals_203, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_204, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_205, (18, ), (1, ))
assert_size_stride(primals_206, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_207, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_208, (9, ), (1, ))
assert_size_stride(primals_209, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_210, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_211, (18, ), (1, ))
assert_size_stride(primals_212, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_213, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_214, (18, ), (1, ))
assert_size_stride(primals_215, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_216, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_217, (9, ), (1, ))
assert_size_stride(primals_218, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_219, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_220, (18, ), (1, ))
assert_size_stride(primals_221, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_222, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_223, (18, ), (1, ))
assert_size_stride(primals_224, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_225, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_226, (9, ), (1, ))
assert_size_stride(primals_227, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_228, (18, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_229, (18, ), (1, ))
assert_size_stride(primals_230, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_231, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_232, (18, ), (1, ))
assert_size_stride(primals_233, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_234, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_235, (18, ), (1, ))
assert_size_stride(primals_236, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_237, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_238, (9, ), (1, ))
assert_size_stride(primals_239, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_240, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_241, (18, ), (1, ))
assert_size_stride(primals_242, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_243, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_244, (18, ), (1, ))
assert_size_stride(primals_245, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_246, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_247, (9, ), (1, ))
assert_size_stride(primals_248, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_249, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_250, (18, ), (1, ))
assert_size_stride(primals_251, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_252, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_253, (18, ), (1, ))
assert_size_stride(primals_254, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_255, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_256, (9, ), (1, ))
assert_size_stride(primals_257, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_258, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_259, (18, ), (1, ))
assert_size_stride(primals_260, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_261, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_262, (18, ), (1, ))
assert_size_stride(primals_263, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_264, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_265, (9, ), (1, ))
assert_size_stride(primals_266, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_267, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_268, (18, ), (1, ))
assert_size_stride(primals_269, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_270, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_271, (18, ), (1, ))
assert_size_stride(primals_272, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_273, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_274, (9, ), (1, ))
assert_size_stride(primals_275, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_276, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_277, (18, ), (1, ))
assert_size_stride(primals_278, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_279, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_280, (18, ), (1, ))
assert_size_stride(primals_281, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_282, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_283, (9, ), (1, ))
assert_size_stride(primals_284, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_285, (18, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_286, (18, ), (1, ))
assert_size_stride(primals_287, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_288, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_289, (18, ), (1, ))
assert_size_stride(primals_290, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_291, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_292, (18, ), (1, ))
assert_size_stride(primals_293, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_294, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_295, (9, ), (1, ))
assert_size_stride(primals_296, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_297, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_298, (18, ), (1, ))
assert_size_stride(primals_299, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_300, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_301, (18, ), (1, ))
assert_size_stride(primals_302, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_303, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_304, (9, ), (1, ))
assert_size_stride(primals_305, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_306, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_307, (18, ), (1, ))
assert_size_stride(primals_308, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_309, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_310, (18, ), (1, ))
assert_size_stride(primals_311, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_312, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_313, (9, ), (1, ))
assert_size_stride(primals_314, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_315, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_316, (18, ), (1, ))
assert_size_stride(primals_317, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_318, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_319, (18, ), (1, ))
assert_size_stride(primals_320, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_321, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_322, (9, ), (1, ))
assert_size_stride(primals_323, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_324, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_325, (18, ), (1, ))
assert_size_stride(primals_326, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_327, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_328, (18, ), (1, ))
assert_size_stride(primals_329, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_330, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_331, (9, ), (1, ))
assert_size_stride(primals_332, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_333, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_334, (18, ), (1, ))
assert_size_stride(primals_335, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_336, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_337, (18, ), (1, ))
assert_size_stride(primals_338, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_339, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_340, (9, ), (1, ))
assert_size_stride(primals_341, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_342, (18, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_343, (18, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
stream0 = get_raw_stream(0)
triton_poi_fused__weight_norm_interface_0.run(primals_3, buf0, 18, grid=grid(18), stream=stream0)
buf1 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_3, primals_2, buf0, buf1, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_1, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf2, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf3, primals_4, 294912, grid=grid(294912), stream=stream0)
del primals_4
buf4 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf5 = reinterpret_tensor(buf4, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf4 # reuse
buf6 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf5, primals_6, primals_5, buf6, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf3, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf7, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf8, primals_7, 294912, grid=grid(294912), stream=stream0)
del primals_7
buf9 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf10 = reinterpret_tensor(buf9, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf9 # reuse
buf11 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_2], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf10, primals_9, primals_8, buf11, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf8, buf11, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf13 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_3], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_12, buf13, 18, grid=grid(18), stream=stream0)
buf14 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_3], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_12, primals_11, buf13, buf14, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf15 = extern_kernels.convolution(primals_1, buf14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf15, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf16 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf16, primals_13, 294912, grid=grid(294912), stream=stream0)
del primals_13
buf17 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf18 = reinterpret_tensor(buf17, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf17 # reuse
buf19 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_4], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf18, primals_15, primals_14, buf19, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf16, buf19, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf20, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf21, primals_16, 294912, grid=grid(294912), stream=stream0)
del primals_16
buf22 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf23 = reinterpret_tensor(buf22, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf22 # reuse
buf24 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_5], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf23, primals_18, primals_17, buf24, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution]
buf25 = extern_kernels.convolution(buf21, buf24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf26 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf12, primals_10, buf25, primals_19, buf26, 294912, grid=grid(294912), stream=stream0)
buf27 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_6], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_21, buf27, 18, grid=grid(18), stream=stream0)
buf28 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_6], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_21, primals_20, buf27, buf28, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
buf29 = extern_kernels.convolution(buf26, buf28, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf29, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf30 = buf29; del buf29 # reuse
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf30, primals_22, 294912, grid=grid(294912), stream=stream0)
del primals_22
buf31 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf32 = reinterpret_tensor(buf31, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf31 # reuse
buf33 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_7], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf32, primals_24, primals_23, buf33, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution]
buf34 = extern_kernels.convolution(buf30, buf33, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf34, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf35 = buf34; del buf34 # reuse
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf35, primals_25, 294912, grid=grid(294912), stream=stream0)
del primals_25
buf36 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf37 = reinterpret_tensor(buf36, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf36 # reuse
buf38 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_8], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf37, primals_27, primals_26, buf38, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
buf39 = extern_kernels.convolution(buf35, buf38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf40 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_9], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_30, buf40, 18, grid=grid(18), stream=stream0)
buf41 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_9], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_30, primals_29, buf40, buf41, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution]
buf42 = extern_kernels.convolution(buf26, buf41, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf42, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf43 = buf42; del buf42 # reuse
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf43, primals_31, 294912, grid=grid(294912), stream=stream0)
del primals_31
buf44 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf45 = reinterpret_tensor(buf44, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf44 # reuse
buf46 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_10], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf45, primals_33, primals_32, buf46, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution]
buf47 = extern_kernels.convolution(buf43, buf46, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf47, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf48 = buf47; del buf47 # reuse
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf48, primals_34, 294912, grid=grid(294912), stream=stream0)
del primals_34
buf49 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf50 = reinterpret_tensor(buf49, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf49 # reuse
buf51 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_11], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf50, primals_36, primals_35, buf51, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution]
buf52 = extern_kernels.convolution(buf48, buf51, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf53 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf39, primals_28, buf52, primals_37, buf53, 294912, grid=grid(294912), stream=stream0)
buf54 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_12], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_39, buf54, 18, grid=grid(18), stream=stream0)
buf55 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_12], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_39, primals_38, buf54, buf55, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution]
buf56 = extern_kernels.convolution(buf53, buf55, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf56, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf57 = buf56; del buf56 # reuse
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf57, primals_40, 294912, grid=grid(294912), stream=stream0)
del primals_40
buf58 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf59 = reinterpret_tensor(buf58, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf58 # reuse
buf60 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_13], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf59, primals_42, primals_41, buf60, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.convolution]
buf61 = extern_kernels.convolution(buf57, buf60, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf61, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf62 = buf61; del buf61 # reuse
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf62, primals_43, 294912, grid=grid(294912), stream=stream0)
del primals_43
buf63 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf64 = reinterpret_tensor(buf63, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf63 # reuse
buf65 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_14], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf64, primals_45, primals_44, buf65, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution]
buf66 = extern_kernels.convolution(buf62, buf65, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf67 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_15], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_48, buf67, 18, grid=grid(18), stream=stream0)
buf68 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_15], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_48, primals_47, buf67, buf68, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution]
buf69 = extern_kernels.convolution(buf53, buf68, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf69, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf70 = buf69; del buf69 # reuse
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf70, primals_49, 294912, grid=grid(294912), stream=stream0)
del primals_49
buf71 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf72 = reinterpret_tensor(buf71, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf71 # reuse
buf73 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_16], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf72, primals_51, primals_50, buf73, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution]
buf74 = extern_kernels.convolution(buf70, buf73, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf74, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf75 = buf74; del buf74 # reuse
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf75, primals_52, 294912, grid=grid(294912), stream=stream0)
del primals_52
buf76 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf77 = reinterpret_tensor(buf76, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf76 # reuse
buf78 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_17], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf77, primals_54, primals_53, buf78, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_17], Original ATen: [aten.convolution]
buf79 = extern_kernels.convolution(buf75, buf78, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf79, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf80 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_4], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf66, primals_46, buf79, primals_55, buf80, 294912, grid=grid(294912), stream=stream0)
buf81 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf82 = reinterpret_tensor(buf81, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf81 # reuse
buf83 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_18], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_6.run(buf82, primals_57, primals_56, buf83, 18, 18, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf84 = extern_kernels.convolution(buf80, buf83, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf84, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf85 = buf84; del buf84 # reuse
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_7.run(buf85, primals_58, primals_1, 294912, grid=grid(294912), stream=stream0)
del primals_58
buf86 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_19], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_60, buf86, 18, grid=grid(18), stream=stream0)
buf87 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_19], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_60, primals_59, buf86, buf87, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_18], Original ATen: [aten.convolution]
buf88 = extern_kernels.convolution(buf85, buf87, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf88, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf89 = buf88; del buf88 # reuse
# Topologically Sorted Source Nodes: [x_18], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf89, primals_61, 294912, grid=grid(294912), stream=stream0)
del primals_61
buf90 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf91 = reinterpret_tensor(buf90, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf90 # reuse
buf92 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_20], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf91, primals_63, primals_62, buf92, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.convolution]
buf93 = extern_kernels.convolution(buf89, buf92, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf93, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf94 = buf93; del buf93 # reuse
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf94, primals_64, 294912, grid=grid(294912), stream=stream0)
del primals_64
buf95 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf96 = reinterpret_tensor(buf95, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf95 # reuse
buf97 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_21], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf96, primals_66, primals_65, buf97, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.convolution]
buf98 = extern_kernels.convolution(buf94, buf97, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf98, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf99 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_22], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_69, buf99, 18, grid=grid(18), stream=stream0)
buf100 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_22], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_69, primals_68, buf99, buf100, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_21], Original ATen: [aten.convolution]
buf101 = extern_kernels.convolution(buf85, buf100, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf101, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf102 = buf101; del buf101 # reuse
# Topologically Sorted Source Nodes: [x_21], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf102, primals_70, 294912, grid=grid(294912), stream=stream0)
del primals_70
buf103 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf104 = reinterpret_tensor(buf103, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf103 # reuse
buf105 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_23], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf104, primals_72, primals_71, buf105, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.convolution]
buf106 = extern_kernels.convolution(buf102, buf105, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf106, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf107 = buf106; del buf106 # reuse
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf107, primals_73, 294912, grid=grid(294912), stream=stream0)
del primals_73
buf108 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf109 = reinterpret_tensor(buf108, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf108 # reuse
buf110 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_24], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf109, primals_75, primals_74, buf110, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_23], Original ATen: [aten.convolution]
buf111 = extern_kernels.convolution(buf107, buf110, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf111, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf112 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_5], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf98, primals_67, buf111, primals_76, buf112, 294912, grid=grid(294912), stream=stream0)
buf113 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_25], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_78, buf113, 18, grid=grid(18), stream=stream0)
buf114 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_25], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_78, primals_77, buf113, buf114, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_24], Original ATen: [aten.convolution]
buf115 = extern_kernels.convolution(buf112, buf114, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf115, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf116 = buf115; del buf115 # reuse
# Topologically Sorted Source Nodes: [x_24], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf116, primals_79, 294912, grid=grid(294912), stream=stream0)
del primals_79
buf117 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf118 = reinterpret_tensor(buf117, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf117 # reuse
buf119 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_26], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf118, primals_81, primals_80, buf119, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.convolution]
buf120 = extern_kernels.convolution(buf116, buf119, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf120, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf121 = buf120; del buf120 # reuse
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf121, primals_82, 294912, grid=grid(294912), stream=stream0)
del primals_82
buf122 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf123 = reinterpret_tensor(buf122, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf122 # reuse
buf124 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_27], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf123, primals_84, primals_83, buf124, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.convolution]
buf125 = extern_kernels.convolution(buf121, buf124, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf125, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf126 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_28], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_87, buf126, 18, grid=grid(18), stream=stream0)
buf127 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_28], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_87, primals_86, buf126, buf127, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_27], Original ATen: [aten.convolution]
buf128 = extern_kernels.convolution(buf112, buf127, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf128, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf129 = buf128; del buf128 # reuse
# Topologically Sorted Source Nodes: [x_27], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf129, primals_88, 294912, grid=grid(294912), stream=stream0)
del primals_88
buf130 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf131 = reinterpret_tensor(buf130, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf130 # reuse
buf132 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_29], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf131, primals_90, primals_89, buf132, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.convolution]
buf133 = extern_kernels.convolution(buf129, buf132, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf133, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf134 = buf133; del buf133 # reuse
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf134, primals_91, 294912, grid=grid(294912), stream=stream0)
del primals_91
buf135 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf136 = reinterpret_tensor(buf135, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf135 # reuse
buf137 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_30], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf136, primals_93, primals_92, buf137, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_29], Original ATen: [aten.convolution]
buf138 = extern_kernels.convolution(buf134, buf137, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf138, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf139 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_7], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf125, primals_85, buf138, primals_94, buf139, 294912, grid=grid(294912), stream=stream0)
buf140 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_31], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_96, buf140, 18, grid=grid(18), stream=stream0)
buf141 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_31], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_96, primals_95, buf140, buf141, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_30], Original ATen: [aten.convolution]
buf142 = extern_kernels.convolution(buf139, buf141, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf142, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf143 = buf142; del buf142 # reuse
# Topologically Sorted Source Nodes: [x_30], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf143, primals_97, 294912, grid=grid(294912), stream=stream0)
del primals_97
buf144 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf145 = reinterpret_tensor(buf144, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf144 # reuse
buf146 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_32], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf145, primals_99, primals_98, buf146, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_31], Original ATen: [aten.convolution]
buf147 = extern_kernels.convolution(buf143, buf146, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf147, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf148 = buf147; del buf147 # reuse
# Topologically Sorted Source Nodes: [x_31], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf148, primals_100, 294912, grid=grid(294912), stream=stream0)
del primals_100
buf149 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf150 = reinterpret_tensor(buf149, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf149 # reuse
buf151 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_33], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf150, primals_102, primals_101, buf151, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_32], Original ATen: [aten.convolution]
buf152 = extern_kernels.convolution(buf148, buf151, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf152, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf153 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_34], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_105, buf153, 18, grid=grid(18), stream=stream0)
buf154 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_34], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_105, primals_104, buf153, buf154, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_33], Original ATen: [aten.convolution]
buf155 = extern_kernels.convolution(buf139, buf154, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf155, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf156 = buf155; del buf155 # reuse
# Topologically Sorted Source Nodes: [x_33], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf156, primals_106, 294912, grid=grid(294912), stream=stream0)
del primals_106
buf157 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf158 = reinterpret_tensor(buf157, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf157 # reuse
buf159 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_35], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf158, primals_108, primals_107, buf159, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_34], Original ATen: [aten.convolution]
buf160 = extern_kernels.convolution(buf156, buf159, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf160, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf161 = buf160; del buf160 # reuse
# Topologically Sorted Source Nodes: [x_34], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf161, primals_109, 294912, grid=grid(294912), stream=stream0)
del primals_109
buf162 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf163 = reinterpret_tensor(buf162, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf162 # reuse
buf164 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_36], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf163, primals_111, primals_110, buf164, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_35], Original ATen: [aten.convolution]
buf165 = extern_kernels.convolution(buf161, buf164, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf165, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf166 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_9], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf152, primals_103, buf165, primals_112, buf166, 294912, grid=grid(294912), stream=stream0)
buf167 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf168 = reinterpret_tensor(buf167, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf167 # reuse
buf169 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_37], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_6.run(buf168, primals_114, primals_113, buf169, 18, 18, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf170 = extern_kernels.convolution(buf166, buf169, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf170, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf171 = buf170; del buf170 # reuse
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_7.run(buf171, primals_115, buf85, 294912, grid=grid(294912), stream=stream0)
del primals_115
buf172 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_38], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_117, buf172, 18, grid=grid(18), stream=stream0)
buf173 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_38], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_117, primals_116, buf172, buf173, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_36], Original ATen: [aten.convolution]
buf174 = extern_kernels.convolution(buf171, buf173, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf174, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf175 = buf174; del buf174 # reuse
# Topologically Sorted Source Nodes: [x_36], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf175, primals_118, 294912, grid=grid(294912), stream=stream0)
del primals_118
buf176 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf177 = reinterpret_tensor(buf176, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf176 # reuse
buf178 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_39], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf177, primals_120, primals_119, buf178, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_37], Original ATen: [aten.convolution]
buf179 = extern_kernels.convolution(buf175, buf178, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf179, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf180 = buf179; del buf179 # reuse
# Topologically Sorted Source Nodes: [x_37], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf180, primals_121, 294912, grid=grid(294912), stream=stream0)
del primals_121
buf181 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf182 = reinterpret_tensor(buf181, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf181 # reuse
buf183 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_40], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf182, primals_123, primals_122, buf183, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_38], Original ATen: [aten.convolution]
buf184 = extern_kernels.convolution(buf180, buf183, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf184, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf185 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_41], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_126, buf185, 18, grid=grid(18), stream=stream0)
buf186 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_41], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_126, primals_125, buf185, buf186, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_39], Original ATen: [aten.convolution]
buf187 = extern_kernels.convolution(buf171, buf186, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf187, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf188 = buf187; del buf187 # reuse
# Topologically Sorted Source Nodes: [x_39], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf188, primals_127, 294912, grid=grid(294912), stream=stream0)
del primals_127
buf189 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf190 = reinterpret_tensor(buf189, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf189 # reuse
buf191 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_42], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf190, primals_129, primals_128, buf191, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_40], Original ATen: [aten.convolution]
buf192 = extern_kernels.convolution(buf188, buf191, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf192, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf193 = buf192; del buf192 # reuse
# Topologically Sorted Source Nodes: [x_40], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf193, primals_130, 294912, grid=grid(294912), stream=stream0)
del primals_130
buf194 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf195 = reinterpret_tensor(buf194, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf194 # reuse
buf196 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_43], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf195, primals_132, primals_131, buf196, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_41], Original ATen: [aten.convolution]
buf197 = extern_kernels.convolution(buf193, buf196, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf197, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf198 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_10], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf184, primals_124, buf197, primals_133, buf198, 294912, grid=grid(294912), stream=stream0)
buf199 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_44], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_135, buf199, 18, grid=grid(18), stream=stream0)
buf200 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_44], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_135, primals_134, buf199, buf200, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_42], Original ATen: [aten.convolution]
buf201 = extern_kernels.convolution(buf198, buf200, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf201, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf202 = buf201; del buf201 # reuse
# Topologically Sorted Source Nodes: [x_42], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf202, primals_136, 294912, grid=grid(294912), stream=stream0)
del primals_136
buf203 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf204 = reinterpret_tensor(buf203, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf203 # reuse
buf205 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_45], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf204, primals_138, primals_137, buf205, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_43], Original ATen: [aten.convolution]
buf206 = extern_kernels.convolution(buf202, buf205, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf206, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf207 = buf206; del buf206 # reuse
# Topologically Sorted Source Nodes: [x_43], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf207, primals_139, 294912, grid=grid(294912), stream=stream0)
del primals_139
buf208 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf209 = reinterpret_tensor(buf208, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf208 # reuse
buf210 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_46], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf209, primals_141, primals_140, buf210, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_44], Original ATen: [aten.convolution]
buf211 = extern_kernels.convolution(buf207, buf210, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf211, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf212 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_47], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_144, buf212, 18, grid=grid(18), stream=stream0)
buf213 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_47], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_144, primals_143, buf212, buf213, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_45], Original ATen: [aten.convolution]
buf214 = extern_kernels.convolution(buf198, buf213, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf214, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf215 = buf214; del buf214 # reuse
# Topologically Sorted Source Nodes: [x_45], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf215, primals_145, 294912, grid=grid(294912), stream=stream0)
del primals_145
buf216 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf217 = reinterpret_tensor(buf216, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf216 # reuse
buf218 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_48], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf217, primals_147, primals_146, buf218, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_46], Original ATen: [aten.convolution]
buf219 = extern_kernels.convolution(buf215, buf218, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf219, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf220 = buf219; del buf219 # reuse
# Topologically Sorted Source Nodes: [x_46], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf220, primals_148, 294912, grid=grid(294912), stream=stream0)
del primals_148
buf221 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf222 = reinterpret_tensor(buf221, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf221 # reuse
buf223 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_49], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf222, primals_150, primals_149, buf223, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_47], Original ATen: [aten.convolution]
buf224 = extern_kernels.convolution(buf220, buf223, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf224, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf225 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_12], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf211, primals_142, buf224, primals_151, buf225, 294912, grid=grid(294912), stream=stream0)
buf226 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_50], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_153, buf226, 18, grid=grid(18), stream=stream0)
buf227 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_50], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_153, primals_152, buf226, buf227, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_48], Original ATen: [aten.convolution]
buf228 = extern_kernels.convolution(buf225, buf227, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf228, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf229 = buf228; del buf228 # reuse
# Topologically Sorted Source Nodes: [x_48], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf229, primals_154, 294912, grid=grid(294912), stream=stream0)
del primals_154
buf230 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf231 = reinterpret_tensor(buf230, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf230 # reuse
buf232 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_51], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf231, primals_156, primals_155, buf232, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_49], Original ATen: [aten.convolution]
buf233 = extern_kernels.convolution(buf229, buf232, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf233, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf234 = buf233; del buf233 # reuse
# Topologically Sorted Source Nodes: [x_49], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf234, primals_157, 294912, grid=grid(294912), stream=stream0)
del primals_157
buf235 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf236 = reinterpret_tensor(buf235, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf235 # reuse
buf237 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_52], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf236, primals_159, primals_158, buf237, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_50], Original ATen: [aten.convolution]
buf238 = extern_kernels.convolution(buf234, buf237, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf238, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf239 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_53], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_162, buf239, 18, grid=grid(18), stream=stream0)
buf240 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_53], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_162, primals_161, buf239, buf240, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_51], Original ATen: [aten.convolution]
buf241 = extern_kernels.convolution(buf225, buf240, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf241, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf242 = buf241; del buf241 # reuse
# Topologically Sorted Source Nodes: [x_51], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf242, primals_163, 294912, grid=grid(294912), stream=stream0)
del primals_163
buf243 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf244 = reinterpret_tensor(buf243, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf243 # reuse
buf245 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_54], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf244, primals_165, primals_164, buf245, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_52], Original ATen: [aten.convolution]
buf246 = extern_kernels.convolution(buf242, buf245, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf246, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf247 = buf246; del buf246 # reuse
# Topologically Sorted Source Nodes: [x_52], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf247, primals_166, 294912, grid=grid(294912), stream=stream0)
del primals_166
buf248 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf249 = reinterpret_tensor(buf248, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf248 # reuse
buf250 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_55], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf249, primals_168, primals_167, buf250, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_53], Original ATen: [aten.convolution]
buf251 = extern_kernels.convolution(buf247, buf250, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf251, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf252 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_14], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf238, primals_160, buf251, primals_169, buf252, 294912, grid=grid(294912), stream=stream0)
buf253 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf254 = reinterpret_tensor(buf253, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf253 # reuse
buf255 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_56], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_6.run(buf254, primals_171, primals_170, buf255, 18, 18, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.convolution]
buf256 = extern_kernels.convolution(buf252, buf255, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf256, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf257 = buf256; del buf256 # reuse
# Topologically Sorted Source Nodes: [out_4, out_5], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_7.run(buf257, primals_172, buf171, 294912, grid=grid(294912), stream=stream0)
del primals_172
buf258 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_57], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_174, buf258, 18, grid=grid(18), stream=stream0)
buf259 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_57], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_174, primals_173, buf258, buf259, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_54], Original ATen: [aten.convolution]
buf260 = extern_kernels.convolution(buf257, buf259, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf260, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf261 = buf260; del buf260 # reuse
# Topologically Sorted Source Nodes: [x_54], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf261, primals_175, 294912, grid=grid(294912), stream=stream0)
del primals_175
buf262 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf263 = reinterpret_tensor(buf262, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf262 # reuse
buf264 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_58], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf263, primals_177, primals_176, buf264, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_55], Original ATen: [aten.convolution]
buf265 = extern_kernels.convolution(buf261, buf264, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf265, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf266 = buf265; del buf265 # reuse
# Topologically Sorted Source Nodes: [x_55], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf266, primals_178, 294912, grid=grid(294912), stream=stream0)
del primals_178
buf267 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf268 = reinterpret_tensor(buf267, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf267 # reuse
buf269 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_59], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf268, primals_180, primals_179, buf269, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_56], Original ATen: [aten.convolution]
buf270 = extern_kernels.convolution(buf266, buf269, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf270, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf271 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_60], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_183, buf271, 18, grid=grid(18), stream=stream0)
buf272 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_60], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_183, primals_182, buf271, buf272, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_57], Original ATen: [aten.convolution]
buf273 = extern_kernels.convolution(buf257, buf272, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf273, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf274 = buf273; del buf273 # reuse
# Topologically Sorted Source Nodes: [x_57], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf274, primals_184, 294912, grid=grid(294912), stream=stream0)
del primals_184
buf275 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf276 = reinterpret_tensor(buf275, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf275 # reuse
buf277 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_61], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf276, primals_186, primals_185, buf277, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_58], Original ATen: [aten.convolution]
buf278 = extern_kernels.convolution(buf274, buf277, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf278, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf279 = buf278; del buf278 # reuse
# Topologically Sorted Source Nodes: [x_58], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf279, primals_187, 294912, grid=grid(294912), stream=stream0)
del primals_187
buf280 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf281 = reinterpret_tensor(buf280, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf280 # reuse
buf282 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_62], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf281, primals_189, primals_188, buf282, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_59], Original ATen: [aten.convolution]
buf283 = extern_kernels.convolution(buf279, buf282, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf283, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf284 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_15], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf270, primals_181, buf283, primals_190, buf284, 294912, grid=grid(294912), stream=stream0)
buf285 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_63], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_192, buf285, 18, grid=grid(18), stream=stream0)
buf286 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_63], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_192, primals_191, buf285, buf286, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_60], Original ATen: [aten.convolution]
buf287 = extern_kernels.convolution(buf284, buf286, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf287, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf288 = buf287; del buf287 # reuse
# Topologically Sorted Source Nodes: [x_60], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf288, primals_193, 294912, grid=grid(294912), stream=stream0)
del primals_193
buf289 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf290 = reinterpret_tensor(buf289, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf289 # reuse
buf291 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_64], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf290, primals_195, primals_194, buf291, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_61], Original ATen: [aten.convolution]
buf292 = extern_kernels.convolution(buf288, buf291, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf292, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf293 = buf292; del buf292 # reuse
# Topologically Sorted Source Nodes: [x_61], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf293, primals_196, 294912, grid=grid(294912), stream=stream0)
del primals_196
buf294 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf295 = reinterpret_tensor(buf294, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf294 # reuse
buf296 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_65], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf295, primals_198, primals_197, buf296, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_62], Original ATen: [aten.convolution]
buf297 = extern_kernels.convolution(buf293, buf296, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf297, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf298 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_66], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_201, buf298, 18, grid=grid(18), stream=stream0)
buf299 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_66], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_201, primals_200, buf298, buf299, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_63], Original ATen: [aten.convolution]
buf300 = extern_kernels.convolution(buf284, buf299, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf300, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf301 = buf300; del buf300 # reuse
# Topologically Sorted Source Nodes: [x_63], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf301, primals_202, 294912, grid=grid(294912), stream=stream0)
del primals_202
buf302 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf303 = reinterpret_tensor(buf302, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf302 # reuse
buf304 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_67], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf303, primals_204, primals_203, buf304, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_64], Original ATen: [aten.convolution]
buf305 = extern_kernels.convolution(buf301, buf304, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf305, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf306 = buf305; del buf305 # reuse
# Topologically Sorted Source Nodes: [x_64], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf306, primals_205, 294912, grid=grid(294912), stream=stream0)
del primals_205
buf307 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf308 = reinterpret_tensor(buf307, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf307 # reuse
buf309 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_68], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf308, primals_207, primals_206, buf309, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_65], Original ATen: [aten.convolution]
buf310 = extern_kernels.convolution(buf306, buf309, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf310, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf311 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_17], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf297, primals_199, buf310, primals_208, buf311, 294912, grid=grid(294912), stream=stream0)
buf312 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_69], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_210, buf312, 18, grid=grid(18), stream=stream0)
buf313 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_69], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_210, primals_209, buf312, buf313, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_66], Original ATen: [aten.convolution]
buf314 = extern_kernels.convolution(buf311, buf313, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf314, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf315 = buf314; del buf314 # reuse
# Topologically Sorted Source Nodes: [x_66], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf315, primals_211, 294912, grid=grid(294912), stream=stream0)
del primals_211
buf316 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf317 = reinterpret_tensor(buf316, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf316 # reuse
buf318 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_70], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf317, primals_213, primals_212, buf318, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_67], Original ATen: [aten.convolution]
buf319 = extern_kernels.convolution(buf315, buf318, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf319, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf320 = buf319; del buf319 # reuse
# Topologically Sorted Source Nodes: [x_67], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf320, primals_214, 294912, grid=grid(294912), stream=stream0)
del primals_214
buf321 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf322 = reinterpret_tensor(buf321, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf321 # reuse
buf323 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_71], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf322, primals_216, primals_215, buf323, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_68], Original ATen: [aten.convolution]
buf324 = extern_kernels.convolution(buf320, buf323, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf324, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf325 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_72], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_219, buf325, 18, grid=grid(18), stream=stream0)
buf326 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_72], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_219, primals_218, buf325, buf326, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_69], Original ATen: [aten.convolution]
buf327 = extern_kernels.convolution(buf311, buf326, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf327, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf328 = buf327; del buf327 # reuse
# Topologically Sorted Source Nodes: [x_69], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf328, primals_220, 294912, grid=grid(294912), stream=stream0)
del primals_220
buf329 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf330 = reinterpret_tensor(buf329, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf329 # reuse
buf331 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_73], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf330, primals_222, primals_221, buf331, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_70], Original ATen: [aten.convolution]
buf332 = extern_kernels.convolution(buf328, buf331, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf332, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf333 = buf332; del buf332 # reuse
# Topologically Sorted Source Nodes: [x_70], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf333, primals_223, 294912, grid=grid(294912), stream=stream0)
del primals_223
buf334 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf335 = reinterpret_tensor(buf334, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf334 # reuse
buf336 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_74], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf335, primals_225, primals_224, buf336, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_71], Original ATen: [aten.convolution]
buf337 = extern_kernels.convolution(buf333, buf336, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf337, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf338 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_19], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf324, primals_217, buf337, primals_226, buf338, 294912, grid=grid(294912), stream=stream0)
buf339 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf340 = reinterpret_tensor(buf339, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf339 # reuse
buf341 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_75], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_6.run(buf340, primals_228, primals_227, buf341, 18, 18, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.convolution]
buf342 = extern_kernels.convolution(buf338, buf341, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf342, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf343 = buf342; del buf342 # reuse
# Topologically Sorted Source Nodes: [out_6, out_7], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_7.run(buf343, primals_229, buf257, 294912, grid=grid(294912), stream=stream0)
del primals_229
buf344 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_76], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_231, buf344, 18, grid=grid(18), stream=stream0)
buf345 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_76], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_231, primals_230, buf344, buf345, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_72], Original ATen: [aten.convolution]
buf346 = extern_kernels.convolution(buf343, buf345, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf346, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf347 = buf346; del buf346 # reuse
# Topologically Sorted Source Nodes: [x_72], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf347, primals_232, 294912, grid=grid(294912), stream=stream0)
del primals_232
buf348 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf349 = reinterpret_tensor(buf348, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf348 # reuse
buf350 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_77], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf349, primals_234, primals_233, buf350, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_73], Original ATen: [aten.convolution]
buf351 = extern_kernels.convolution(buf347, buf350, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf351, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf352 = buf351; del buf351 # reuse
# Topologically Sorted Source Nodes: [x_73], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf352, primals_235, 294912, grid=grid(294912), stream=stream0)
del primals_235
buf353 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf354 = reinterpret_tensor(buf353, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf353 # reuse
buf355 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_78], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf354, primals_237, primals_236, buf355, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_74], Original ATen: [aten.convolution]
buf356 = extern_kernels.convolution(buf352, buf355, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf356, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf357 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_79], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_240, buf357, 18, grid=grid(18), stream=stream0)
buf358 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_79], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_240, primals_239, buf357, buf358, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_75], Original ATen: [aten.convolution]
buf359 = extern_kernels.convolution(buf343, buf358, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf359, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf360 = buf359; del buf359 # reuse
# Topologically Sorted Source Nodes: [x_75], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf360, primals_241, 294912, grid=grid(294912), stream=stream0)
del primals_241
buf361 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf362 = reinterpret_tensor(buf361, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf361 # reuse
buf363 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_80], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf362, primals_243, primals_242, buf363, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_76], Original ATen: [aten.convolution]
buf364 = extern_kernels.convolution(buf360, buf363, stride=(1, 1), padding=(3, 3), dilation=(3, 3), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf364, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf365 = buf364; del buf364 # reuse
# Topologically Sorted Source Nodes: [x_76], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf365, primals_244, 294912, grid=grid(294912), stream=stream0)
del primals_244
buf366 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf367 = reinterpret_tensor(buf366, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf366 # reuse
buf368 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_81], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf367, primals_246, primals_245, buf368, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_77], Original ATen: [aten.convolution]
buf369 = extern_kernels.convolution(buf365, buf368, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf369, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf370 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_20], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf356, primals_238, buf369, primals_247, buf370, 294912, grid=grid(294912), stream=stream0)
buf371 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_82], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_249, buf371, 18, grid=grid(18), stream=stream0)
buf372 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_82], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_249, primals_248, buf371, buf372, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_78], Original ATen: [aten.convolution]
buf373 = extern_kernels.convolution(buf370, buf372, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf373, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf374 = buf373; del buf373 # reuse
# Topologically Sorted Source Nodes: [x_78], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf374, primals_250, 294912, grid=grid(294912), stream=stream0)
del primals_250
buf375 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf376 = reinterpret_tensor(buf375, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf375 # reuse
buf377 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_83], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf376, primals_252, primals_251, buf377, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_79], Original ATen: [aten.convolution]
buf378 = extern_kernels.convolution(buf374, buf377, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf378, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf379 = buf378; del buf378 # reuse
# Topologically Sorted Source Nodes: [x_79], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf379, primals_253, 294912, grid=grid(294912), stream=stream0)
del primals_253
buf380 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf381 = reinterpret_tensor(buf380, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf380 # reuse
buf382 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_84], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf381, primals_255, primals_254, buf382, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_80], Original ATen: [aten.convolution]
buf383 = extern_kernels.convolution(buf379, buf382, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf383, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf384 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_85], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_258, buf384, 18, grid=grid(18), stream=stream0)
buf385 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_85], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_258, primals_257, buf384, buf385, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_81], Original ATen: [aten.convolution]
buf386 = extern_kernels.convolution(buf370, buf385, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf386, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf387 = buf386; del buf386 # reuse
# Topologically Sorted Source Nodes: [x_81], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf387, primals_259, 294912, grid=grid(294912), stream=stream0)
del primals_259
buf388 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf389 = reinterpret_tensor(buf388, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf388 # reuse
buf390 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_86], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf389, primals_261, primals_260, buf390, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_82], Original ATen: [aten.convolution]
buf391 = extern_kernels.convolution(buf387, buf390, stride=(1, 1), padding=(3, 3), dilation=(3, 3), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf391, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf392 = buf391; del buf391 # reuse
# Topologically Sorted Source Nodes: [x_82], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf392, primals_262, 294912, grid=grid(294912), stream=stream0)
del primals_262
buf393 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf394 = reinterpret_tensor(buf393, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf393 # reuse
buf395 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_87], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf394, primals_264, primals_263, buf395, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_83], Original ATen: [aten.convolution]
buf396 = extern_kernels.convolution(buf392, buf395, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf396, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf397 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_22], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf383, primals_256, buf396, primals_265, buf397, 294912, grid=grid(294912), stream=stream0)
buf398 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_88], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_267, buf398, 18, grid=grid(18), stream=stream0)
buf399 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_88], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_267, primals_266, buf398, buf399, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_84], Original ATen: [aten.convolution]
buf400 = extern_kernels.convolution(buf397, buf399, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf400, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf401 = buf400; del buf400 # reuse
# Topologically Sorted Source Nodes: [x_84], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf401, primals_268, 294912, grid=grid(294912), stream=stream0)
del primals_268
buf402 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf403 = reinterpret_tensor(buf402, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf402 # reuse
buf404 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_89], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf403, primals_270, primals_269, buf404, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_85], Original ATen: [aten.convolution]
buf405 = extern_kernels.convolution(buf401, buf404, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf405, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf406 = buf405; del buf405 # reuse
# Topologically Sorted Source Nodes: [x_85], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf406, primals_271, 294912, grid=grid(294912), stream=stream0)
del primals_271
buf407 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf408 = reinterpret_tensor(buf407, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf407 # reuse
buf409 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_90], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf408, primals_273, primals_272, buf409, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_86], Original ATen: [aten.convolution]
buf410 = extern_kernels.convolution(buf406, buf409, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf410, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf411 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_91], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_276, buf411, 18, grid=grid(18), stream=stream0)
buf412 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_91], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_276, primals_275, buf411, buf412, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_87], Original ATen: [aten.convolution]
buf413 = extern_kernels.convolution(buf397, buf412, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf413, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf414 = buf413; del buf413 # reuse
# Topologically Sorted Source Nodes: [x_87], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf414, primals_277, 294912, grid=grid(294912), stream=stream0)
del primals_277
buf415 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf416 = reinterpret_tensor(buf415, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf415 # reuse
buf417 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_92], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf416, primals_279, primals_278, buf417, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_88], Original ATen: [aten.convolution]
buf418 = extern_kernels.convolution(buf414, buf417, stride=(1, 1), padding=(3, 3), dilation=(3, 3), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf418, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf419 = buf418; del buf418 # reuse
# Topologically Sorted Source Nodes: [x_88], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf419, primals_280, 294912, grid=grid(294912), stream=stream0)
del primals_280
buf420 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf421 = reinterpret_tensor(buf420, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf420 # reuse
buf422 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_93], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf421, primals_282, primals_281, buf422, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_89], Original ATen: [aten.convolution]
buf423 = extern_kernels.convolution(buf419, buf422, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf423, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf424 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_24], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf410, primals_274, buf423, primals_283, buf424, 294912, grid=grid(294912), stream=stream0)
buf425 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf426 = reinterpret_tensor(buf425, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf425 # reuse
buf427 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_94], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_6.run(buf426, primals_285, primals_284, buf427, 18, 18, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.convolution]
buf428 = extern_kernels.convolution(buf424, buf427, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf428, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf429 = buf428; del buf428 # reuse
# Topologically Sorted Source Nodes: [out_8, out_9], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_7.run(buf429, primals_286, buf343, 294912, grid=grid(294912), stream=stream0)
del primals_286
buf430 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_95], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_288, buf430, 18, grid=grid(18), stream=stream0)
buf431 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_95], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_288, primals_287, buf430, buf431, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_90], Original ATen: [aten.convolution]
buf432 = extern_kernels.convolution(buf429, buf431, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf432, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf433 = buf432; del buf432 # reuse
# Topologically Sorted Source Nodes: [x_90], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf433, primals_289, 294912, grid=grid(294912), stream=stream0)
del primals_289
buf434 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf435 = reinterpret_tensor(buf434, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf434 # reuse
buf436 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_96], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf435, primals_291, primals_290, buf436, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_91], Original ATen: [aten.convolution]
buf437 = extern_kernels.convolution(buf433, buf436, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf437, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf438 = buf437; del buf437 # reuse
# Topologically Sorted Source Nodes: [x_91], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf438, primals_292, 294912, grid=grid(294912), stream=stream0)
del primals_292
buf439 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf440 = reinterpret_tensor(buf439, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf439 # reuse
buf441 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_97], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf440, primals_294, primals_293, buf441, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_92], Original ATen: [aten.convolution]
buf442 = extern_kernels.convolution(buf438, buf441, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf442, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf443 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_98], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_297, buf443, 18, grid=grid(18), stream=stream0)
buf444 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_98], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_297, primals_296, buf443, buf444, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_93], Original ATen: [aten.convolution]
buf445 = extern_kernels.convolution(buf429, buf444, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf445, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf446 = buf445; del buf445 # reuse
# Topologically Sorted Source Nodes: [x_93], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf446, primals_298, 294912, grid=grid(294912), stream=stream0)
del primals_298
buf447 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf448 = reinterpret_tensor(buf447, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf447 # reuse
buf449 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_99], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf448, primals_300, primals_299, buf449, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_94], Original ATen: [aten.convolution]
buf450 = extern_kernels.convolution(buf446, buf449, stride=(1, 1), padding=(3, 3), dilation=(3, 3), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf450, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf451 = buf450; del buf450 # reuse
# Topologically Sorted Source Nodes: [x_94], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf451, primals_301, 294912, grid=grid(294912), stream=stream0)
del primals_301
buf452 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf453 = reinterpret_tensor(buf452, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf452 # reuse
buf454 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_100], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf453, primals_303, primals_302, buf454, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_95], Original ATen: [aten.convolution]
buf455 = extern_kernels.convolution(buf451, buf454, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf455, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf456 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_25], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf442, primals_295, buf455, primals_304, buf456, 294912, grid=grid(294912), stream=stream0)
buf457 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_101], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_306, buf457, 18, grid=grid(18), stream=stream0)
buf458 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_101], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_306, primals_305, buf457, buf458, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_96], Original ATen: [aten.convolution]
buf459 = extern_kernels.convolution(buf456, buf458, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf459, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf460 = buf459; del buf459 # reuse
# Topologically Sorted Source Nodes: [x_96], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf460, primals_307, 294912, grid=grid(294912), stream=stream0)
del primals_307
buf461 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf462 = reinterpret_tensor(buf461, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf461 # reuse
buf463 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_102], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf462, primals_309, primals_308, buf463, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_97], Original ATen: [aten.convolution]
buf464 = extern_kernels.convolution(buf460, buf463, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf464, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf465 = buf464; del buf464 # reuse
# Topologically Sorted Source Nodes: [x_97], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf465, primals_310, 294912, grid=grid(294912), stream=stream0)
del primals_310
buf466 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf467 = reinterpret_tensor(buf466, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf466 # reuse
buf468 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_103], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf467, primals_312, primals_311, buf468, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_98], Original ATen: [aten.convolution]
buf469 = extern_kernels.convolution(buf465, buf468, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf469, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf470 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_104], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_315, buf470, 18, grid=grid(18), stream=stream0)
buf471 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_104], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_315, primals_314, buf470, buf471, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_99], Original ATen: [aten.convolution]
buf472 = extern_kernels.convolution(buf456, buf471, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf472, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf473 = buf472; del buf472 # reuse
# Topologically Sorted Source Nodes: [x_99], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf473, primals_316, 294912, grid=grid(294912), stream=stream0)
del primals_316
buf474 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf475 = reinterpret_tensor(buf474, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf474 # reuse
buf476 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_105], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf475, primals_318, primals_317, buf476, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_100], Original ATen: [aten.convolution]
buf477 = extern_kernels.convolution(buf473, buf476, stride=(1, 1), padding=(3, 3), dilation=(3, 3), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf477, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf478 = buf477; del buf477 # reuse
# Topologically Sorted Source Nodes: [x_100], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf478, primals_319, 294912, grid=grid(294912), stream=stream0)
del primals_319
buf479 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf480 = reinterpret_tensor(buf479, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf479 # reuse
buf481 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_106], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf480, primals_321, primals_320, buf481, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_101], Original ATen: [aten.convolution]
buf482 = extern_kernels.convolution(buf478, buf481, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf482, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf483 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_27], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf469, primals_313, buf482, primals_322, buf483, 294912, grid=grid(294912), stream=stream0)
buf484 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_107], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_324, buf484, 18, grid=grid(18), stream=stream0)
buf485 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_107], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_324, primals_323, buf484, buf485, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_102], Original ATen: [aten.convolution]
buf486 = extern_kernels.convolution(buf483, buf485, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf486, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf487 = buf486; del buf486 # reuse
# Topologically Sorted Source Nodes: [x_102], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf487, primals_325, 294912, grid=grid(294912), stream=stream0)
del primals_325
buf488 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf489 = reinterpret_tensor(buf488, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf488 # reuse
buf490 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_108], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf489, primals_327, primals_326, buf490, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_103], Original ATen: [aten.convolution]
buf491 = extern_kernels.convolution(buf487, buf490, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf491, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf492 = buf491; del buf491 # reuse
# Topologically Sorted Source Nodes: [x_103], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf492, primals_328, 294912, grid=grid(294912), stream=stream0)
del primals_328
buf493 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf494 = reinterpret_tensor(buf493, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf493 # reuse
buf495 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_109], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf494, primals_330, primals_329, buf495, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_104], Original ATen: [aten.convolution]
buf496 = extern_kernels.convolution(buf492, buf495, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf496, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf497 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_110], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_0.run(primals_333, buf497, 18, grid=grid(18), stream=stream0)
buf498 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_110], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_333, primals_332, buf497, buf498, 108, grid=grid(108), stream=stream0)
# Topologically Sorted Source Nodes: [x_105], Original ATen: [aten.convolution]
buf499 = extern_kernels.convolution(buf483, buf498, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf499, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf500 = buf499; del buf499 # reuse
# Topologically Sorted Source Nodes: [x_105], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf500, primals_334, 294912, grid=grid(294912), stream=stream0)
del primals_334
buf501 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf502 = reinterpret_tensor(buf501, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf501 # reuse
buf503 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_111], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_3.run(buf502, primals_336, primals_335, buf503, 18, 9, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [x_106], Original ATen: [aten.convolution]
buf504 = extern_kernels.convolution(buf500, buf503, stride=(1, 1), padding=(3, 3), dilation=(3, 3), transposed=False, output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf504, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf505 = buf504; del buf504 # reuse
# Topologically Sorted Source Nodes: [x_106], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf505, primals_337, 294912, grid=grid(294912), stream=stream0)
del primals_337
buf506 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf507 = reinterpret_tensor(buf506, (9, 1, 1, 1), (1, 1, 1, 1), 0); del buf506 # reuse
buf508 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_112], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_4.run(buf507, primals_339, primals_338, buf508, 9, 18, grid=grid(9), stream=stream0)
# Topologically Sorted Source Nodes: [x_107], Original ATen: [aten.convolution]
buf509 = extern_kernels.convolution(buf505, buf508, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf509, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf510 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_29], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf496, primals_331, buf509, primals_340, buf510, 294912, grid=grid(294912), stream=stream0)
buf511 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32)
buf512 = reinterpret_tensor(buf511, (18, 1, 1, 1), (1, 1, 1, 1), 0); del buf511 # reuse
buf513 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_113], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_6.run(buf512, primals_342, primals_341, buf513, 18, 18, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [out_10], Original ATen: [aten.convolution]
buf514 = extern_kernels.convolution(buf510, buf513, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf514, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf515 = buf514; del buf514 # reuse
# Topologically Sorted Source Nodes: [out_10, out_11, out_12], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_8.run(buf515, primals_343, buf429, primals_1, 294912, grid=grid(294912), stream=stream0)
del primals_343
buf516 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_107, c2_18], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf509, primals_340, buf516, 147456, grid=grid(147456), stream=stream0)
del buf509
del primals_340
buf517 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_104, c1_18], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf496, primals_331, buf517, 147456, grid=grid(147456), stream=stream0)
del buf496
del primals_331
buf518 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_101, c2_17], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf482, primals_322, buf518, 147456, grid=grid(147456), stream=stream0)
del buf482
del primals_322
buf519 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_98, c1_17], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf469, primals_313, buf519, 147456, grid=grid(147456), stream=stream0)
del buf469
del primals_313
buf520 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_95, c2_16], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf455, primals_304, buf520, 147456, grid=grid(147456), stream=stream0)
del buf455
del primals_304
buf521 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_92, c1_16], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf442, primals_295, buf521, 147456, grid=grid(147456), stream=stream0)
del buf442
del primals_295
buf522 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_89, c2_15], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf423, primals_283, buf522, 147456, grid=grid(147456), stream=stream0)
del buf423
del primals_283
buf523 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_86, c1_15], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf410, primals_274, buf523, 147456, grid=grid(147456), stream=stream0)
del buf410
del primals_274
buf524 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_83, c2_14], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf396, primals_265, buf524, 147456, grid=grid(147456), stream=stream0)
del buf396
del primals_265
buf525 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_80, c1_14], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf383, primals_256, buf525, 147456, grid=grid(147456), stream=stream0)
del buf383
del primals_256
buf526 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_77, c2_13], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf369, primals_247, buf526, 147456, grid=grid(147456), stream=stream0)
del buf369
del primals_247
buf527 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_74, c1_13], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf356, primals_238, buf527, 147456, grid=grid(147456), stream=stream0)
del buf356
del primals_238
buf528 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_71, c2_12], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf337, primals_226, buf528, 147456, grid=grid(147456), stream=stream0)
del buf337
del primals_226
buf529 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_68, c1_12], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf324, primals_217, buf529, 147456, grid=grid(147456), stream=stream0)
del buf324
del primals_217
buf530 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_65, c2_11], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf310, primals_208, buf530, 147456, grid=grid(147456), stream=stream0)
del buf310
del primals_208
buf531 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_62, c1_11], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf297, primals_199, buf531, 147456, grid=grid(147456), stream=stream0)
del buf297
del primals_199
buf532 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_59, c2_10], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf283, primals_190, buf532, 147456, grid=grid(147456), stream=stream0)
del buf283
del primals_190
buf533 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_56, c1_10], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf270, primals_181, buf533, 147456, grid=grid(147456), stream=stream0)
del buf270
del primals_181
buf534 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_53, c2_9], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf251, primals_169, buf534, 147456, grid=grid(147456), stream=stream0)
del buf251
del primals_169
buf535 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_50, c1_9], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf238, primals_160, buf535, 147456, grid=grid(147456), stream=stream0)
del buf238
del primals_160
buf536 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_47, c2_8], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf224, primals_151, buf536, 147456, grid=grid(147456), stream=stream0)
del buf224
del primals_151
buf537 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_44, c1_8], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf211, primals_142, buf537, 147456, grid=grid(147456), stream=stream0)
del buf211
del primals_142
buf538 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_41, c2_7], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf197, primals_133, buf538, 147456, grid=grid(147456), stream=stream0)
del buf197
del primals_133
buf539 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_38, c1_7], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf184, primals_124, buf539, 147456, grid=grid(147456), stream=stream0)
del buf184
del primals_124
buf540 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_35, c2_6], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf165, primals_112, buf540, 147456, grid=grid(147456), stream=stream0)
del buf165
del primals_112
buf541 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_32, c1_6], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf152, primals_103, buf541, 147456, grid=grid(147456), stream=stream0)
del buf152
del primals_103
buf542 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_29, c2_5], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf138, primals_94, buf542, 147456, grid=grid(147456), stream=stream0)
del buf138
del primals_94
buf543 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_26, c1_5], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf125, primals_85, buf543, 147456, grid=grid(147456), stream=stream0)
del buf125
del primals_85
buf544 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_23, c2_3], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf111, primals_76, buf544, 147456, grid=grid(147456), stream=stream0)
del buf111
del primals_76
buf545 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_20, c1_3], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf98, primals_67, buf545, 147456, grid=grid(147456), stream=stream0)
del buf98
del primals_67
buf546 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_17, c2_4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf79, primals_55, buf546, 147456, grid=grid(147456), stream=stream0)
del buf79
del primals_55
buf547 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_14, c1_4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf66, primals_46, buf547, 147456, grid=grid(147456), stream=stream0)
del buf66
del primals_46
buf548 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_11, c2_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf52, primals_37, buf548, 147456, grid=grid(147456), stream=stream0)
del buf52
del primals_37
buf549 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_8, c1_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf39, primals_28, buf549, 147456, grid=grid(147456), stream=stream0)
del buf39
del primals_28
buf550 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_5, c2_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf25, primals_19, buf550, 147456, grid=grid(147456), stream=stream0)
del buf25
del primals_19
buf551 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_2, c1_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9.run(buf12, primals_10, buf551, 147456, grid=grid(147456), stream=stream0)
del buf12
del primals_10
return (buf515, buf1, buf6, buf11, buf14, buf19, buf24, buf28, buf33, buf38, buf41, buf46, buf51, buf55, buf60, buf65, buf68, buf73, buf78, buf83, buf87, buf92, buf97, buf100, buf105, buf110, buf114, buf119, buf124, buf127, buf132, buf137, buf141, buf146, buf151, buf154, buf159, buf164, buf169, buf173, buf178, buf183, buf186, buf191, buf196, buf200, buf205, buf210, buf213, buf218, buf223, buf227, buf232, buf237, buf240, buf245, buf250, buf255, buf259, buf264, buf269, buf272, buf277, buf282, buf286, buf291, buf296, buf299, buf304, buf309, buf313, buf318, buf323, buf326, buf331, buf336, buf341, buf345, buf350, buf355, buf358, buf363, buf368, buf372, buf377, buf382, buf385, buf390, buf395, buf399, buf404, buf409, buf412, buf417, buf422, buf427, buf431, buf436, buf441, buf444, buf449, buf454, buf458, buf463, buf468, buf471, buf476, buf481, buf485, buf490, buf495, buf498, buf503, buf508, buf513, primals_1, primals_2, primals_3, primals_5, primals_6, primals_8, primals_9, primals_11, primals_12, primals_14, primals_15, primals_17, primals_18, primals_20, primals_21, primals_23, primals_24, primals_26, primals_27, primals_29, primals_30, primals_32, primals_33, primals_35, primals_36, primals_38, primals_39, primals_41, primals_42, primals_44, primals_45, primals_47, primals_48, primals_50, primals_51, primals_53, primals_54, primals_56, primals_57, primals_59, primals_60, primals_62, primals_63, primals_65, primals_66, primals_68, primals_69, primals_71, primals_72, primals_74, primals_75, primals_77, primals_78, primals_80, primals_81, primals_83, primals_84, primals_86, primals_87, primals_89, primals_90, primals_92, primals_93, primals_95, primals_96, primals_98, primals_99, primals_101, primals_102, primals_104, primals_105, primals_107, primals_108, primals_110, primals_111, primals_113, primals_114, primals_116, primals_117, primals_119, primals_120, primals_122, primals_123, primals_125, primals_126, primals_128, primals_129, primals_131, primals_132, primals_134, primals_135, primals_137, primals_138, primals_140, primals_141, primals_143, primals_144, primals_146, primals_147, primals_149, primals_150, primals_152, primals_153, primals_155, primals_156, primals_158, primals_159, primals_161, primals_162, primals_164, primals_165, primals_167, primals_168, primals_170, primals_171, primals_173, primals_174, primals_176, primals_177, primals_179, primals_180, primals_182, primals_183, primals_185, primals_186, primals_188, primals_189, primals_191, primals_192, primals_194, primals_195, primals_197, primals_198, primals_200, primals_201, primals_203, primals_204, primals_206, primals_207, primals_209, primals_210, primals_212, primals_213, primals_215, primals_216, primals_218, primals_219, primals_221, primals_222, primals_224, primals_225, primals_227, primals_228, primals_230, primals_231, primals_233, primals_234, primals_236, primals_237, primals_239, primals_240, primals_242, primals_243, primals_245, primals_246, primals_248, primals_249, primals_251, primals_252, primals_254, primals_255, primals_257, primals_258, primals_260, primals_261, primals_263, primals_264, primals_266, primals_267, primals_269, primals_270, primals_272, primals_273, primals_275, primals_276, primals_278, primals_279, primals_281, primals_282, primals_284, primals_285, primals_287, primals_288, primals_290, primals_291, primals_293, primals_294, primals_296, primals_297, primals_299, primals_300, primals_302, primals_303, primals_305, primals_306, primals_308, primals_309, primals_311, primals_312, primals_314, primals_315, primals_317, primals_318, primals_320, primals_321, primals_323, primals_324, primals_326, primals_327, primals_329, primals_330, primals_332, primals_333, primals_335, primals_336, primals_338, primals_339, primals_341, primals_342, buf0, buf1, buf3, buf5, buf6, buf8, buf10, buf11, buf13, buf14, buf16, buf18, buf19, buf21, buf23, buf24, buf26, buf27, buf28, buf30, buf32, buf33, buf35, buf37, buf38, buf40, buf41, buf43, buf45, buf46, buf48, buf50, buf51, buf53, buf54, buf55, buf57, buf59, buf60, buf62, buf64, buf65, buf67, buf68, buf70, buf72, buf73, buf75, buf77, buf78, buf80, buf82, buf83, buf85, buf86, buf87, buf89, buf91, buf92, buf94, buf96, buf97, buf99, buf100, buf102, buf104, buf105, buf107, buf109, buf110, buf112, buf113, buf114, buf116, buf118, buf119, buf121, buf123, buf124, buf126, buf127, buf129, buf131, buf132, buf134, buf136, buf137, buf139, buf140, buf141, buf143, buf145, buf146, buf148, buf150, buf151, buf153, buf154, buf156, buf158, buf159, buf161, buf163, buf164, buf166, buf168, buf169, buf171, buf172, buf173, buf175, buf177, buf178, buf180, buf182, buf183, buf185, buf186, buf188, buf190, buf191, buf193, buf195, buf196, buf198, buf199, buf200, buf202, buf204, buf205, buf207, buf209, buf210, buf212, buf213, buf215, buf217, buf218, buf220, buf222, buf223, buf225, buf226, buf227, buf229, buf231, buf232, buf234, buf236, buf237, buf239, buf240, buf242, buf244, buf245, buf247, buf249, buf250, buf252, buf254, buf255, buf257, buf258, buf259, buf261, buf263, buf264, buf266, buf268, buf269, buf271, buf272, buf274, buf276, buf277, buf279, buf281, buf282, buf284, buf285, buf286, buf288, buf290, buf291, buf293, buf295, buf296, buf298, buf299, buf301, buf303, buf304, buf306, buf308, buf309, buf311, buf312, buf313, buf315, buf317, buf318, buf320, buf322, buf323, buf325, buf326, buf328, buf330, buf331, buf333, buf335, buf336, buf338, buf340, buf341, buf343, buf344, buf345, buf347, buf349, buf350, buf352, buf354, buf355, buf357, buf358, buf360, buf362, buf363, buf365, buf367, buf368, buf370, buf371, buf372, buf374, buf376, buf377, buf379, buf381, buf382, buf384, buf385, buf387, buf389, buf390, buf392, buf394, buf395, buf397, buf398, buf399, buf401, buf403, buf404, buf406, buf408, buf409, buf411, buf412, buf414, buf416, buf417, buf419, buf421, buf422, buf424, buf426, buf427, buf429, buf430, buf431, buf433, buf435, buf436, buf438, buf440, buf441, buf443, buf444, buf446, buf448, buf449, buf451, buf453, buf454, buf456, buf457, buf458, buf460, buf462, buf463, buf465, buf467, buf468, buf470, buf471, buf473, buf475, buf476, buf478, buf480, buf481, buf483, buf484, buf485, buf487, buf489, buf490, buf492, buf494, buf495, buf497, buf498, buf500, buf502, buf503, buf505, buf507, buf508, buf510, buf512, buf513, buf516, buf517, buf518, buf519, buf520, buf521, buf522, buf523, buf524, buf525, buf526, buf527, buf528, buf529, buf530, buf531, buf532, buf533, buf534, buf535, buf536, buf537, buf538, buf539, buf540, buf541, buf542, buf543, buf544, buf545, buf546, buf547, buf548, buf549, buf550, buf551, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 18, 64, 64), (73728, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_48 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_49 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_50 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_51 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_52 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_53 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_54 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_55 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_56 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_57 = rand_strided((18, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_58 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_59 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_60 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_61 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_62 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_63 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_64 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_65 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_66 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_67 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_68 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_69 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_70 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_71 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_72 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_73 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_74 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_75 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_76 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_77 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_78 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_79 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_80 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_81 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_82 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_83 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_84 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_85 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_86 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_87 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_88 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_89 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_90 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_91 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_92 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_93 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_94 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_95 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_96 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_97 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_98 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_99 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_100 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_101 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_102 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_103 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_104 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_105 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_106 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_107 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_108 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_109 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_110 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_111 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_112 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_113 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_114 = rand_strided((18, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_115 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_116 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_117 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_118 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_119 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_120 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_121 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_122 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_123 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_124 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_125 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_126 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_127 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_128 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_129 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_130 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_131 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_132 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_133 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_134 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_135 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_136 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_137 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_138 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_139 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_140 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_141 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_142 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_143 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_144 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_145 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_146 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_147 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_148 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_149 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_150 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_151 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_152 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_153 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_154 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_155 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_156 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_157 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_158 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_159 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_160 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_161 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_162 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_163 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_164 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_165 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_166 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_167 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_168 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_169 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_170 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_171 = rand_strided((18, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_172 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_173 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_174 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_175 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_176 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_177 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_178 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_179 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_180 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_181 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_182 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_183 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_184 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_185 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_186 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_187 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_188 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_189 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_190 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_191 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_192 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_193 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_194 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_195 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_196 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_197 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_198 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_199 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_200 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_201 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_202 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_203 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_204 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_205 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_206 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_207 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_208 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_209 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_210 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_211 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_212 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_213 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_214 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_215 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_216 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_217 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_218 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_219 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_220 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_221 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_222 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_223 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_224 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_225 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_226 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_227 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_228 = rand_strided((18, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_229 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_230 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_231 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_232 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_233 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_234 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_235 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_236 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_237 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_238 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_239 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_240 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_241 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_242 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_243 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_244 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_245 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_246 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_247 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_248 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_249 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_250 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_251 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_252 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_253 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_254 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_255 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_256 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_257 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_258 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_259 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_260 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_261 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_262 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_263 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_264 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_265 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_266 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_267 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_268 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_269 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_270 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_271 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_272 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_273 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_274 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_275 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_276 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_277 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_278 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_279 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_280 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_281 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_282 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_283 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_284 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_285 = rand_strided((18, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_286 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_287 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_288 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_289 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_290 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_291 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_292 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_293 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_294 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_295 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_296 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_297 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_298 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_299 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_300 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_301 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_302 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_303 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_304 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_305 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_306 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_307 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_308 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_309 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_310 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_311 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_312 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_313 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_314 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_315 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_316 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_317 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_318 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_319 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_320 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_321 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_322 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_323 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_324 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_325 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_326 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_327 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_328 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_329 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_330 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_331 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_332 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_333 = rand_strided((18, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_334 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_335 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_336 = rand_strided((18, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_337 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_338 = rand_strided((9, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_339 = rand_strided((9, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_340 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_341 = rand_strided((18, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_342 = rand_strided((18, 18, 1, 1), (18, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_343 = rand_strided((18, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154, primals_155, primals_156, primals_157, primals_158, primals_159, primals_160, primals_161, primals_162, primals_163, primals_164, primals_165, primals_166, primals_167, primals_168, primals_169, primals_170, primals_171, primals_172, primals_173, primals_174, primals_175, primals_176, primals_177, primals_178, primals_179, primals_180, primals_181, primals_182, primals_183, primals_184, primals_185, primals_186, primals_187, primals_188, primals_189, primals_190, primals_191, primals_192, primals_193, primals_194, primals_195, primals_196, primals_197, primals_198, primals_199, primals_200, primals_201, primals_202, primals_203, primals_204, primals_205, primals_206, primals_207, primals_208, primals_209, primals_210, primals_211, primals_212, primals_213, primals_214, primals_215, primals_216, primals_217, primals_218, primals_219, primals_220, primals_221, primals_222, primals_223, primals_224, primals_225, primals_226, primals_227, primals_228, primals_229, primals_230, primals_231, primals_232, primals_233, primals_234, primals_235, primals_236, primals_237, primals_238, primals_239, primals_240, primals_241, primals_242, primals_243, primals_244, primals_245, primals_246, primals_247, primals_248, primals_249, primals_250, primals_251, primals_252, primals_253, primals_254, primals_255, primals_256, primals_257, primals_258, primals_259, primals_260, primals_261, primals_262, primals_263, primals_264, primals_265, primals_266, primals_267, primals_268, primals_269, primals_270, primals_271, primals_272, primals_273, primals_274, primals_275, primals_276, primals_277, primals_278, primals_279, primals_280, primals_281, primals_282, primals_283, primals_284, primals_285, primals_286, primals_287, primals_288, primals_289, primals_290, primals_291, primals_292, primals_293, primals_294, primals_295, primals_296, primals_297, primals_298, primals_299, primals_300, primals_301, primals_302, primals_303, primals_304, primals_305, primals_306, primals_307, primals_308, primals_309, primals_310, primals_311, primals_312, primals_313, primals_314, primals_315, primals_316, primals_317, primals_318, primals_319, primals_320, primals_321, primals_322, primals_323, primals_324, primals_325, primals_326, primals_327, primals_328, primals_329, primals_330, primals_331, primals_332, primals_333, primals_334, primals_335, primals_336, primals_337, primals_338, primals_339, primals_340, primals_341, primals_342, primals_343])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, groups=3):
super(ConvBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
def wn(x):
return torch.nn.utils.weight_norm(x)
self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
1, groups=self.groups))
self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
3, padding=1, groups=in_channels))
self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels,
1, groups=1))
def forward(self, x):
x = self.group_conv(x)
x = self.depth_conv(x)
x = self.point_conv(x)
return x
class ConvBlockD(nn.Module):
def __init__(self, in_channels, out_channels, groups=3, ker_size=2):
super(ConvBlockD, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
def wn(x):
return torch.nn.utils.weight_norm(x)
self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
1, groups=self.groups))
self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
3, padding=ker_size, dilation=ker_size, groups=in_channels))
self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels,
1, groups=1))
def forward(self, x):
x = self.group_conv(x)
x = self.depth_conv(x)
x = self.point_conv(x)
return x
class MIRB1(nn.Module):
def __init__(self, args):
super(MIRB1, self).__init__()
self.c_out = args.n_feats // 2
def wn(x):
return torch.nn.utils.weight_norm(x)
self.conv3_1 = ConvBlock(args.n_feats, self.c_out)
self.convd_1 = ConvBlock(args.n_feats, self.c_out)
self.conv3_2 = ConvBlock(args.n_feats, self.c_out)
self.convd_2 = ConvBlock(args.n_feats, self.c_out)
self.conv3_3 = ConvBlock(args.n_feats, self.c_out)
self.convd_3 = ConvBlock(args.n_feats, self.c_out)
self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1))
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
res = x
c1_1 = self.lrelu(self.conv3_1(res))
c2_1 = self.lrelu(self.convd_1(res))
c1_2 = self.lrelu(self.conv3_2(torch.cat([c1_1, c2_1], 1)))
c2_2 = self.lrelu(self.convd_2(torch.cat([c1_1, c2_1], 1)))
c1_4 = self.lrelu(self.conv3_3(torch.cat([c1_2, c2_2], 1)))
c2_4 = self.lrelu(self.convd_3(torch.cat([c1_2, c2_2], 1)))
out = self.conv_last(torch.cat([c1_4, c2_4], 1))
out = out + x
return out
class MIRB2(nn.Module):
def __init__(self, args):
super(MIRB2, self).__init__()
self.c_out = args.n_feats // 2
def wn(x):
return torch.nn.utils.weight_norm(x)
self.conv3_1 = ConvBlock(args.n_feats, self.c_out)
self.convd_1 = ConvBlockD(args.n_feats, self.c_out, ker_size=2)
self.conv3_2 = ConvBlock(args.n_feats, self.c_out)
self.convd_2 = ConvBlockD(args.n_feats, self.c_out, ker_size=2)
self.conv3_3 = ConvBlock(args.n_feats, self.c_out)
self.convd_3 = ConvBlockD(args.n_feats, self.c_out, ker_size=2)
self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1))
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
res = x
c1_1 = self.lrelu(self.conv3_1(res))
c2_1 = self.lrelu(self.convd_1(res))
c1_2 = self.lrelu(self.conv3_2(torch.cat([c1_1, c2_1], 1)))
c2_2 = self.lrelu(self.convd_2(torch.cat([c1_1, c2_1], 1)))
c1_4 = self.lrelu(self.conv3_3(torch.cat([c1_2, c2_2], 1)))
c2_4 = self.lrelu(self.convd_3(torch.cat([c1_2, c2_2], 1)))
out = self.conv_last(torch.cat([c1_4, c2_4], 1))
out = out + x
return out
class MIRB3(nn.Module):
def __init__(self, args):
super(MIRB3, self).__init__()
self.c_out = args.n_feats // 2
def wn(x):
return torch.nn.utils.weight_norm(x)
self.conv3_1 = ConvBlock(args.n_feats, self.c_out)
self.convd_1 = ConvBlockD(args.n_feats, self.c_out, ker_size=3)
self.conv3_2 = ConvBlock(args.n_feats, self.c_out)
self.convd_2 = ConvBlockD(args.n_feats, self.c_out, ker_size=3)
self.conv3_3 = ConvBlock(args.n_feats, self.c_out)
self.convd_3 = ConvBlockD(args.n_feats, self.c_out, ker_size=3)
self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1))
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
res = x
c1_1 = self.lrelu(self.conv3_1(res))
c2_1 = self.lrelu(self.convd_1(res))
c1_2 = self.lrelu(self.conv3_2(torch.cat([c1_1, c2_1], 1)))
c2_2 = self.lrelu(self.convd_2(torch.cat([c1_1, c2_1], 1)))
c1_4 = self.lrelu(self.conv3_3(torch.cat([c1_2, c2_2], 1)))
c2_4 = self.lrelu(self.convd_3(torch.cat([c1_2, c2_2], 1)))
out = self.conv_last(torch.cat([c1_4, c2_4], 1))
out = out + x
return out
class MMFB(nn.Module):
def __init__(self, args):
super(MMFB, self).__init__()
def wn(x):
return torch.nn.utils.weight_norm(x)
self.bs1 = MIRB1(args)
self.bs11 = MIRB1(args)
self.bs2 = MIRB2(args)
self.bs22 = MIRB2(args)
self.bs3 = MIRB3(args)
self.bs33 = MIRB3(args)
def forward(self, x):
res = x
res = self.bs1(res)
res = self.bs11(res)
res = self.bs2(res)
res = self.bs22(res)
res = self.bs3(res)
res = self.bs33(res)
out = res + x
return out
def get_inputs():
return [torch.rand([4, 18, 64, 64])]
def get_init_inputs():
return [[], {'args': _mock_config(n_feats=18)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 18
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 6 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 6 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 6 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 6 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (4 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (5 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tl.store(out_ptr0 + x0, tmp17, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 108
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 6
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 18
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_per_fused__weight_norm_interface_3(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 18
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 9 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 9 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_per_fused__weight_norm_interface_4(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 9
rnumel = 18
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 18 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 18 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 18
x0 = xindex % 4096
x2 = xindex // 73728
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 9, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 36864 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = 0.0
tmp9 = tmp7 > tmp8
tmp10 = 0.2
tmp11 = tmp7 * tmp10
tmp12 = tl.where(tmp9, tmp7, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tl.full([1], 18, tl.int64)
tmp18 = tl.load(in_ptr2 + (x0 + 4096 * (-9 + x1) + 36864 * x2), tmp15,
other=0.0)
tmp19 = tl.load(in_ptr3 + (-9 + x1), tmp15, eviction_policy=
'evict_last', other=0.0)
tmp20 = tmp18 + tmp19
tmp21 = tmp20 > tmp8
tmp22 = tmp20 * tmp10
tmp23 = tl.where(tmp21, tmp20, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp15, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp14, tmp25)
tl.store(out_ptr0 + x3, tmp26, None)
@triton.jit
def triton_per_fused__weight_norm_interface_6(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 18
rnumel = 18
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 18 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 18 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_poi_fused_add_convolution_7(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 18
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_add_convolution_8(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 18
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp5 = tl.load(in_ptr2 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(in_out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 9
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + x3, tmp8, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57,
primals_58, primals_59, primals_60, primals_61, primals_62,
primals_63, primals_64, primals_65, primals_66, primals_67,
primals_68, primals_69, primals_70, primals_71, primals_72,
primals_73, primals_74, primals_75, primals_76, primals_77,
primals_78, primals_79, primals_80, primals_81, primals_82,
primals_83, primals_84, primals_85, primals_86, primals_87,
primals_88, primals_89, primals_90, primals_91, primals_92,
primals_93, primals_94, primals_95, primals_96, primals_97,
primals_98, primals_99, primals_100, primals_101, primals_102,
primals_103, primals_104, primals_105, primals_106, primals_107,
primals_108, primals_109, primals_110, primals_111, primals_112,
primals_113, primals_114, primals_115, primals_116, primals_117,
primals_118, primals_119, primals_120, primals_121, primals_122,
primals_123, primals_124, primals_125, primals_126, primals_127,
primals_128, primals_129, primals_130, primals_131, primals_132,
primals_133, primals_134, primals_135, primals_136, primals_137,
primals_138, primals_139, primals_140, primals_141, primals_142,
primals_143, primals_144, primals_145, primals_146, primals_147,
primals_148, primals_149, primals_150, primals_151, primals_152,
primals_153, primals_154, primals_155, primals_156, primals_157,
primals_158, primals_159, primals_160, primals_161, primals_162,
primals_163, primals_164, primals_165, primals_166, primals_167,
primals_168, primals_169, primals_170, primals_171, primals_172,
primals_173, primals_174, primals_175, primals_176, primals_177,
primals_178, primals_179, primals_180, primals_181, primals_182,
primals_183, primals_184, primals_185, primals_186, primals_187,
primals_188, primals_189, primals_190, primals_191, primals_192,
primals_193, primals_194, primals_195, primals_196, primals_197,
primals_198, primals_199, primals_200, primals_201, primals_202,
primals_203, primals_204, primals_205, primals_206, primals_207,
primals_208, primals_209, primals_210, primals_211, primals_212,
primals_213, primals_214, primals_215, primals_216, primals_217,
primals_218, primals_219, primals_220, primals_221, primals_222,
primals_223, primals_224, primals_225, primals_226, primals_227,
primals_228, primals_229, primals_230, primals_231, primals_232,
primals_233, primals_234, primals_235, primals_236, primals_237,
primals_238, primals_239, primals_240, primals_241, primals_242,
primals_243, primals_244, primals_245, primals_246, primals_247,
primals_248, primals_249, primals_250, primals_251, primals_252,
primals_253, primals_254, primals_255, primals_256, primals_257,
primals_258, primals_259, primals_260, primals_261, primals_262,
primals_263, primals_264, primals_265, primals_266, primals_267,
primals_268, primals_269, primals_270, primals_271, primals_272,
primals_273, primals_274, primals_275, primals_276, primals_277,
primals_278, primals_279, primals_280, primals_281, primals_282,
primals_283, primals_284, primals_285, primals_286, primals_287,
primals_288, primals_289, primals_290, primals_291, primals_292,
primals_293, primals_294, primals_295, primals_296, primals_297,
primals_298, primals_299, primals_300, primals_301, primals_302,
primals_303, primals_304, primals_305, primals_306, primals_307,
primals_308, primals_309, primals_310, primals_311, primals_312,
primals_313, primals_314, primals_315, primals_316, primals_317,
primals_318, primals_319, primals_320, primals_321, primals_322,
primals_323, primals_324, primals_325, primals_326, primals_327,
primals_328, primals_329, primals_330, primals_331, primals_332,
primals_333, primals_334, primals_335, primals_336, primals_337,
primals_338, primals_339, primals_340, primals_341, primals_342,
primals_343) = args
args.clear()
assert_size_stride(primals_1, (4, 18, 64, 64), (73728, 4096, 64, 1))
assert_size_stride(primals_2, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_3, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_4, (18,), (1,))
assert_size_stride(primals_5, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_6, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_7, (18,), (1,))
assert_size_stride(primals_8, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_9, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_10, (9,), (1,))
assert_size_stride(primals_11, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_12, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_13, (18,), (1,))
assert_size_stride(primals_14, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_15, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_16, (18,), (1,))
assert_size_stride(primals_17, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_18, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_19, (9,), (1,))
assert_size_stride(primals_20, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_21, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_22, (18,), (1,))
assert_size_stride(primals_23, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_24, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_25, (18,), (1,))
assert_size_stride(primals_26, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_27, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_28, (9,), (1,))
assert_size_stride(primals_29, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_30, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_31, (18,), (1,))
assert_size_stride(primals_32, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_33, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_34, (18,), (1,))
assert_size_stride(primals_35, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_36, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_37, (9,), (1,))
assert_size_stride(primals_38, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_39, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_40, (18,), (1,))
assert_size_stride(primals_41, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_42, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_43, (18,), (1,))
assert_size_stride(primals_44, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_45, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_46, (9,), (1,))
assert_size_stride(primals_47, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_48, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_49, (18,), (1,))
assert_size_stride(primals_50, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_51, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_52, (18,), (1,))
assert_size_stride(primals_53, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_54, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_55, (9,), (1,))
assert_size_stride(primals_56, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_57, (18, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_58, (18,), (1,))
assert_size_stride(primals_59, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_60, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_61, (18,), (1,))
assert_size_stride(primals_62, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_63, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_64, (18,), (1,))
assert_size_stride(primals_65, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_66, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_67, (9,), (1,))
assert_size_stride(primals_68, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_69, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_70, (18,), (1,))
assert_size_stride(primals_71, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_72, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_73, (18,), (1,))
assert_size_stride(primals_74, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_75, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_76, (9,), (1,))
assert_size_stride(primals_77, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_78, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_79, (18,), (1,))
assert_size_stride(primals_80, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_81, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_82, (18,), (1,))
assert_size_stride(primals_83, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_84, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_85, (9,), (1,))
assert_size_stride(primals_86, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_87, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_88, (18,), (1,))
assert_size_stride(primals_89, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_90, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_91, (18,), (1,))
assert_size_stride(primals_92, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_93, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_94, (9,), (1,))
assert_size_stride(primals_95, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_96, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_97, (18,), (1,))
assert_size_stride(primals_98, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_99, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_100, (18,), (1,))
assert_size_stride(primals_101, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_102, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_103, (9,), (1,))
assert_size_stride(primals_104, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_105, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_106, (18,), (1,))
assert_size_stride(primals_107, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_108, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_109, (18,), (1,))
assert_size_stride(primals_110, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_111, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_112, (9,), (1,))
assert_size_stride(primals_113, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_114, (18, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_115, (18,), (1,))
assert_size_stride(primals_116, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_117, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_118, (18,), (1,))
assert_size_stride(primals_119, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_120, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_121, (18,), (1,))
assert_size_stride(primals_122, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_123, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_124, (9,), (1,))
assert_size_stride(primals_125, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_126, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_127, (18,), (1,))
assert_size_stride(primals_128, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_129, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_130, (18,), (1,))
assert_size_stride(primals_131, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_132, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_133, (9,), (1,))
assert_size_stride(primals_134, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_135, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_136, (18,), (1,))
assert_size_stride(primals_137, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_138, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_139, (18,), (1,))
assert_size_stride(primals_140, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_141, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_142, (9,), (1,))
assert_size_stride(primals_143, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_144, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_145, (18,), (1,))
assert_size_stride(primals_146, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_147, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_148, (18,), (1,))
assert_size_stride(primals_149, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_150, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_151, (9,), (1,))
assert_size_stride(primals_152, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_153, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_154, (18,), (1,))
assert_size_stride(primals_155, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_156, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_157, (18,), (1,))
assert_size_stride(primals_158, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_159, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_160, (9,), (1,))
assert_size_stride(primals_161, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_162, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_163, (18,), (1,))
assert_size_stride(primals_164, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_165, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_166, (18,), (1,))
assert_size_stride(primals_167, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_168, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_169, (9,), (1,))
assert_size_stride(primals_170, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_171, (18, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_172, (18,), (1,))
assert_size_stride(primals_173, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_174, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_175, (18,), (1,))
assert_size_stride(primals_176, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_177, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_178, (18,), (1,))
assert_size_stride(primals_179, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_180, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_181, (9,), (1,))
assert_size_stride(primals_182, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_183, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_184, (18,), (1,))
assert_size_stride(primals_185, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_186, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_187, (18,), (1,))
assert_size_stride(primals_188, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_189, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_190, (9,), (1,))
assert_size_stride(primals_191, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_192, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_193, (18,), (1,))
assert_size_stride(primals_194, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_195, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_196, (18,), (1,))
assert_size_stride(primals_197, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_198, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_199, (9,), (1,))
assert_size_stride(primals_200, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_201, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_202, (18,), (1,))
assert_size_stride(primals_203, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_204, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_205, (18,), (1,))
assert_size_stride(primals_206, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_207, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_208, (9,), (1,))
assert_size_stride(primals_209, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_210, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_211, (18,), (1,))
assert_size_stride(primals_212, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_213, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_214, (18,), (1,))
assert_size_stride(primals_215, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_216, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_217, (9,), (1,))
assert_size_stride(primals_218, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_219, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_220, (18,), (1,))
assert_size_stride(primals_221, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_222, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_223, (18,), (1,))
assert_size_stride(primals_224, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_225, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_226, (9,), (1,))
assert_size_stride(primals_227, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_228, (18, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_229, (18,), (1,))
assert_size_stride(primals_230, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_231, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_232, (18,), (1,))
assert_size_stride(primals_233, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_234, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_235, (18,), (1,))
assert_size_stride(primals_236, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_237, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_238, (9,), (1,))
assert_size_stride(primals_239, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_240, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_241, (18,), (1,))
assert_size_stride(primals_242, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_243, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_244, (18,), (1,))
assert_size_stride(primals_245, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_246, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_247, (9,), (1,))
assert_size_stride(primals_248, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_249, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_250, (18,), (1,))
assert_size_stride(primals_251, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_252, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_253, (18,), (1,))
assert_size_stride(primals_254, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_255, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_256, (9,), (1,))
assert_size_stride(primals_257, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_258, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_259, (18,), (1,))
assert_size_stride(primals_260, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_261, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_262, (18,), (1,))
assert_size_stride(primals_263, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_264, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_265, (9,), (1,))
assert_size_stride(primals_266, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_267, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_268, (18,), (1,))
assert_size_stride(primals_269, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_270, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_271, (18,), (1,))
assert_size_stride(primals_272, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_273, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_274, (9,), (1,))
assert_size_stride(primals_275, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_276, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_277, (18,), (1,))
assert_size_stride(primals_278, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_279, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_280, (18,), (1,))
assert_size_stride(primals_281, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_282, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_283, (9,), (1,))
assert_size_stride(primals_284, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_285, (18, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_286, (18,), (1,))
assert_size_stride(primals_287, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_288, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_289, (18,), (1,))
assert_size_stride(primals_290, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_291, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_292, (18,), (1,))
assert_size_stride(primals_293, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_294, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_295, (9,), (1,))
assert_size_stride(primals_296, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_297, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_298, (18,), (1,))
assert_size_stride(primals_299, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_300, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_301, (18,), (1,))
assert_size_stride(primals_302, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_303, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_304, (9,), (1,))
assert_size_stride(primals_305, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_306, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_307, (18,), (1,))
assert_size_stride(primals_308, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_309, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_310, (18,), (1,))
assert_size_stride(primals_311, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_312, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_313, (9,), (1,))
assert_size_stride(primals_314, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_315, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_316, (18,), (1,))
assert_size_stride(primals_317, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_318, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_319, (18,), (1,))
assert_size_stride(primals_320, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_321, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_322, (9,), (1,))
assert_size_stride(primals_323, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_324, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_325, (18,), (1,))
assert_size_stride(primals_326, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_327, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_328, (18,), (1,))
assert_size_stride(primals_329, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_330, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_331, (9,), (1,))
assert_size_stride(primals_332, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_333, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_334, (18,), (1,))
assert_size_stride(primals_335, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_336, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_337, (18,), (1,))
assert_size_stride(primals_338, (9, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_339, (9, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_340, (9,), (1,))
assert_size_stride(primals_341, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_342, (18, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_343, (18,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_3, buf0,
18, XBLOCK=32, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_3,
primals_2, buf0, buf1, 108, XBLOCK=128, num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(primals_1, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf2, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_2[grid(294912)](buf3, primals_4,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32
)
buf5 = reinterpret_tensor(buf4, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf4
buf6 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf5, primals_6,
primals_5, buf6, 18, 9, XBLOCK=32, num_warps=4, num_stages=1)
buf7 = extern_kernels.convolution(buf3, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf7, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf8 = buf7
del buf7
triton_poi_fused_convolution_2[grid(294912)](buf8, primals_7,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf9 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf10 = reinterpret_tensor(buf9, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf9
buf11 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf10, primals_9,
primals_8, buf11, 9, 18, XBLOCK=1, num_warps=2, num_stages=1)
buf12 = extern_kernels.convolution(buf8, buf11, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf13 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_12,
buf13, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_12,
primals_11, buf13, buf14, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf15 = extern_kernels.convolution(primals_1, buf14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf15, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf16 = buf15
del buf15
triton_poi_fused_convolution_2[grid(294912)](buf16, primals_13,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_13
buf17 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf18 = reinterpret_tensor(buf17, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf17
buf19 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf18,
primals_15, primals_14, buf19, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf20 = extern_kernels.convolution(buf16, buf19, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf20, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_2[grid(294912)](buf21, primals_16,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_16
buf22 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf23 = reinterpret_tensor(buf22, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf22
buf24 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf23,
primals_18, primals_17, buf24, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf25 = extern_kernels.convolution(buf21, buf24, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf26 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf12, primals_10, buf25,
primals_19, buf26, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf27 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_21,
buf27, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf28 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_21,
primals_20, buf27, buf28, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf29 = extern_kernels.convolution(buf26, buf28, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf29, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf30 = buf29
del buf29
triton_poi_fused_convolution_2[grid(294912)](buf30, primals_22,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_22
buf31 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf32 = reinterpret_tensor(buf31, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf31
buf33 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf32,
primals_24, primals_23, buf33, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf34 = extern_kernels.convolution(buf30, buf33, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf34, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf35 = buf34
del buf34
triton_poi_fused_convolution_2[grid(294912)](buf35, primals_25,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_25
buf36 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf37 = reinterpret_tensor(buf36, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf36
buf38 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf37,
primals_27, primals_26, buf38, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf39 = extern_kernels.convolution(buf35, buf38, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf40 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_30,
buf40, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf41 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_30,
primals_29, buf40, buf41, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf42 = extern_kernels.convolution(buf26, buf41, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf42, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf43 = buf42
del buf42
triton_poi_fused_convolution_2[grid(294912)](buf43, primals_31,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_31
buf44 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf45 = reinterpret_tensor(buf44, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf44
buf46 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf45,
primals_33, primals_32, buf46, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf47 = extern_kernels.convolution(buf43, buf46, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf47, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf48 = buf47
del buf47
triton_poi_fused_convolution_2[grid(294912)](buf48, primals_34,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_34
buf49 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf50 = reinterpret_tensor(buf49, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf49
buf51 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf50,
primals_36, primals_35, buf51, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf52 = extern_kernels.convolution(buf48, buf51, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf53 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf39, primals_28, buf52,
primals_37, buf53, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf54 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_39,
buf54, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf55 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_39,
primals_38, buf54, buf55, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf56 = extern_kernels.convolution(buf53, buf55, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf56, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf57 = buf56
del buf56
triton_poi_fused_convolution_2[grid(294912)](buf57, primals_40,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_40
buf58 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf59 = reinterpret_tensor(buf58, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf58
buf60 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf59,
primals_42, primals_41, buf60, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf61 = extern_kernels.convolution(buf57, buf60, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf61, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf62 = buf61
del buf61
triton_poi_fused_convolution_2[grid(294912)](buf62, primals_43,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_43
buf63 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf64 = reinterpret_tensor(buf63, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf63
buf65 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf64,
primals_45, primals_44, buf65, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf66 = extern_kernels.convolution(buf62, buf65, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf67 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_48,
buf67, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf68 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_48,
primals_47, buf67, buf68, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf69 = extern_kernels.convolution(buf53, buf68, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf69, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf70 = buf69
del buf69
triton_poi_fused_convolution_2[grid(294912)](buf70, primals_49,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_49
buf71 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf72 = reinterpret_tensor(buf71, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf71
buf73 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf72,
primals_51, primals_50, buf73, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf74 = extern_kernels.convolution(buf70, buf73, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf74, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf75 = buf74
del buf74
triton_poi_fused_convolution_2[grid(294912)](buf75, primals_52,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_52
buf76 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf77 = reinterpret_tensor(buf76, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf76
buf78 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf77,
primals_54, primals_53, buf78, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf79 = extern_kernels.convolution(buf75, buf78, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf79, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf80 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf66, primals_46, buf79,
primals_55, buf80, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf81 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf82 = reinterpret_tensor(buf81, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf81
buf83 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_6[grid(18)](buf82,
primals_57, primals_56, buf83, 18, 18, XBLOCK=32, num_warps=8,
num_stages=1)
buf84 = extern_kernels.convolution(buf80, buf83, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf84, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf85 = buf84
del buf84
triton_poi_fused_add_convolution_7[grid(294912)](buf85, primals_58,
primals_1, 294912, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_58
buf86 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_60,
buf86, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf87 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_60,
primals_59, buf86, buf87, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf88 = extern_kernels.convolution(buf85, buf87, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf88, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf89 = buf88
del buf88
triton_poi_fused_convolution_2[grid(294912)](buf89, primals_61,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_61
buf90 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf91 = reinterpret_tensor(buf90, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf90
buf92 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf91,
primals_63, primals_62, buf92, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf93 = extern_kernels.convolution(buf89, buf92, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf93, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf94 = buf93
del buf93
triton_poi_fused_convolution_2[grid(294912)](buf94, primals_64,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_64
buf95 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf96 = reinterpret_tensor(buf95, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf95
buf97 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(9)](buf96,
primals_66, primals_65, buf97, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf98 = extern_kernels.convolution(buf94, buf97, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf98, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf99 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_69,
buf99, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf100 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_69,
primals_68, buf99, buf100, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf101 = extern_kernels.convolution(buf85, buf100, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf101, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf102 = buf101
del buf101
triton_poi_fused_convolution_2[grid(294912)](buf102, primals_70,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_70
buf103 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf104 = reinterpret_tensor(buf103, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf103
buf105 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf104,
primals_72, primals_71, buf105, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf106 = extern_kernels.convolution(buf102, buf105, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf106, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf107 = buf106
del buf106
triton_poi_fused_convolution_2[grid(294912)](buf107, primals_73,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_73
buf108 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf109 = reinterpret_tensor(buf108, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf108
buf110 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf109,
primals_75, primals_74, buf110, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf111 = extern_kernels.convolution(buf107, buf110, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf111, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf112 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf98, primals_67, buf111,
primals_76, buf112, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf113 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_78,
buf113, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf114 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_78,
primals_77, buf113, buf114, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf115 = extern_kernels.convolution(buf112, buf114, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf115, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf116 = buf115
del buf115
triton_poi_fused_convolution_2[grid(294912)](buf116, primals_79,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_79
buf117 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf118 = reinterpret_tensor(buf117, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf117
buf119 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf118,
primals_81, primals_80, buf119, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf120 = extern_kernels.convolution(buf116, buf119, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf120, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf121 = buf120
del buf120
triton_poi_fused_convolution_2[grid(294912)](buf121, primals_82,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_82
buf122 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf123 = reinterpret_tensor(buf122, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf122
buf124 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf123,
primals_84, primals_83, buf124, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf125 = extern_kernels.convolution(buf121, buf124, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf125, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf126 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_87,
buf126, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf127 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_87,
primals_86, buf126, buf127, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf128 = extern_kernels.convolution(buf112, buf127, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf128, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf129 = buf128
del buf128
triton_poi_fused_convolution_2[grid(294912)](buf129, primals_88,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_88
buf130 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf131 = reinterpret_tensor(buf130, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf130
buf132 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf131,
primals_90, primals_89, buf132, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf133 = extern_kernels.convolution(buf129, buf132, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf133, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf134 = buf133
del buf133
triton_poi_fused_convolution_2[grid(294912)](buf134, primals_91,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_91
buf135 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf136 = reinterpret_tensor(buf135, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf135
buf137 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf136,
primals_93, primals_92, buf137, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf138 = extern_kernels.convolution(buf134, buf137, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf138, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf139 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf125, primals_85, buf138,
primals_94, buf139, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf140 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_96,
buf140, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf141 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_96,
primals_95, buf140, buf141, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf142 = extern_kernels.convolution(buf139, buf141, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf142, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf143 = buf142
del buf142
triton_poi_fused_convolution_2[grid(294912)](buf143, primals_97,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_97
buf144 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf145 = reinterpret_tensor(buf144, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf144
buf146 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf145,
primals_99, primals_98, buf146, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf147 = extern_kernels.convolution(buf143, buf146, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf147, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf148 = buf147
del buf147
triton_poi_fused_convolution_2[grid(294912)](buf148, primals_100,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_100
buf149 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf150 = reinterpret_tensor(buf149, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf149
buf151 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf150,
primals_102, primals_101, buf151, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf152 = extern_kernels.convolution(buf148, buf151, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf152, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf153 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_105,
buf153, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf154 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_105,
primals_104, buf153, buf154, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf155 = extern_kernels.convolution(buf139, buf154, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf155, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf156 = buf155
del buf155
triton_poi_fused_convolution_2[grid(294912)](buf156, primals_106,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_106
buf157 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf158 = reinterpret_tensor(buf157, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf157
buf159 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf158,
primals_108, primals_107, buf159, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf160 = extern_kernels.convolution(buf156, buf159, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf160, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf161 = buf160
del buf160
triton_poi_fused_convolution_2[grid(294912)](buf161, primals_109,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_109
buf162 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf163 = reinterpret_tensor(buf162, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf162
buf164 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf163,
primals_111, primals_110, buf164, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf165 = extern_kernels.convolution(buf161, buf164, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf165, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf166 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf152, primals_103, buf165,
primals_112, buf166, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf167 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf168 = reinterpret_tensor(buf167, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf167
buf169 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.
float32)
triton_per_fused__weight_norm_interface_6[grid(18)](buf168,
primals_114, primals_113, buf169, 18, 18, XBLOCK=32, num_warps=
8, num_stages=1)
buf170 = extern_kernels.convolution(buf166, buf169, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf170, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf171 = buf170
del buf170
triton_poi_fused_add_convolution_7[grid(294912)](buf171,
primals_115, buf85, 294912, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_115
buf172 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_117,
buf172, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf173 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_117,
primals_116, buf172, buf173, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf174 = extern_kernels.convolution(buf171, buf173, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf174, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf175 = buf174
del buf174
triton_poi_fused_convolution_2[grid(294912)](buf175, primals_118,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_118
buf176 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf177 = reinterpret_tensor(buf176, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf176
buf178 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf177,
primals_120, primals_119, buf178, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf179 = extern_kernels.convolution(buf175, buf178, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf179, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf180 = buf179
del buf179
triton_poi_fused_convolution_2[grid(294912)](buf180, primals_121,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_121
buf181 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf182 = reinterpret_tensor(buf181, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf181
buf183 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf182,
primals_123, primals_122, buf183, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf184 = extern_kernels.convolution(buf180, buf183, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf184, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf185 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_126,
buf185, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf186 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_126,
primals_125, buf185, buf186, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf187 = extern_kernels.convolution(buf171, buf186, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf187, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf188 = buf187
del buf187
triton_poi_fused_convolution_2[grid(294912)](buf188, primals_127,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_127
buf189 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf190 = reinterpret_tensor(buf189, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf189
buf191 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf190,
primals_129, primals_128, buf191, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf192 = extern_kernels.convolution(buf188, buf191, stride=(1, 1),
padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf192, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf193 = buf192
del buf192
triton_poi_fused_convolution_2[grid(294912)](buf193, primals_130,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_130
buf194 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf195 = reinterpret_tensor(buf194, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf194
buf196 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf195,
primals_132, primals_131, buf196, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf197 = extern_kernels.convolution(buf193, buf196, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf197, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf198 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf184, primals_124, buf197,
primals_133, buf198, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf199 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_135,
buf199, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf200 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_135,
primals_134, buf199, buf200, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf201 = extern_kernels.convolution(buf198, buf200, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf201, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf202 = buf201
del buf201
triton_poi_fused_convolution_2[grid(294912)](buf202, primals_136,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_136
buf203 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf204 = reinterpret_tensor(buf203, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf203
buf205 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf204,
primals_138, primals_137, buf205, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf206 = extern_kernels.convolution(buf202, buf205, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf206, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf207 = buf206
del buf206
triton_poi_fused_convolution_2[grid(294912)](buf207, primals_139,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_139
buf208 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf209 = reinterpret_tensor(buf208, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf208
buf210 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf209,
primals_141, primals_140, buf210, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf211 = extern_kernels.convolution(buf207, buf210, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf211, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf212 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_144,
buf212, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf213 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_144,
primals_143, buf212, buf213, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf214 = extern_kernels.convolution(buf198, buf213, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf214, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf215 = buf214
del buf214
triton_poi_fused_convolution_2[grid(294912)](buf215, primals_145,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_145
buf216 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf217 = reinterpret_tensor(buf216, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf216
buf218 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf217,
primals_147, primals_146, buf218, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf219 = extern_kernels.convolution(buf215, buf218, stride=(1, 1),
padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf219, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf220 = buf219
del buf219
triton_poi_fused_convolution_2[grid(294912)](buf220, primals_148,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_148
buf221 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf222 = reinterpret_tensor(buf221, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf221
buf223 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf222,
primals_150, primals_149, buf223, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf224 = extern_kernels.convolution(buf220, buf223, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf224, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf225 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf211, primals_142, buf224,
primals_151, buf225, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf226 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_153,
buf226, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf227 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_153,
primals_152, buf226, buf227, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf228 = extern_kernels.convolution(buf225, buf227, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf228, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf229 = buf228
del buf228
triton_poi_fused_convolution_2[grid(294912)](buf229, primals_154,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_154
buf230 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf231 = reinterpret_tensor(buf230, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf230
buf232 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf231,
primals_156, primals_155, buf232, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf233 = extern_kernels.convolution(buf229, buf232, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf233, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf234 = buf233
del buf233
triton_poi_fused_convolution_2[grid(294912)](buf234, primals_157,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_157
buf235 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf236 = reinterpret_tensor(buf235, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf235
buf237 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf236,
primals_159, primals_158, buf237, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf238 = extern_kernels.convolution(buf234, buf237, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf238, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf239 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_162,
buf239, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf240 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_162,
primals_161, buf239, buf240, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf241 = extern_kernels.convolution(buf225, buf240, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf241, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf242 = buf241
del buf241
triton_poi_fused_convolution_2[grid(294912)](buf242, primals_163,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_163
buf243 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf244 = reinterpret_tensor(buf243, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf243
buf245 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf244,
primals_165, primals_164, buf245, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf246 = extern_kernels.convolution(buf242, buf245, stride=(1, 1),
padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf246, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf247 = buf246
del buf246
triton_poi_fused_convolution_2[grid(294912)](buf247, primals_166,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_166
buf248 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf249 = reinterpret_tensor(buf248, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf248
buf250 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf249,
primals_168, primals_167, buf250, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf251 = extern_kernels.convolution(buf247, buf250, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf251, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf252 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf238, primals_160, buf251,
primals_169, buf252, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf253 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf254 = reinterpret_tensor(buf253, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf253
buf255 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.
float32)
triton_per_fused__weight_norm_interface_6[grid(18)](buf254,
primals_171, primals_170, buf255, 18, 18, XBLOCK=32, num_warps=
8, num_stages=1)
buf256 = extern_kernels.convolution(buf252, buf255, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf256, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf257 = buf256
del buf256
triton_poi_fused_add_convolution_7[grid(294912)](buf257,
primals_172, buf171, 294912, XBLOCK=1024, num_warps=4, num_stages=1
)
del primals_172
buf258 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_174,
buf258, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf259 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_174,
primals_173, buf258, buf259, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf260 = extern_kernels.convolution(buf257, buf259, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf260, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf261 = buf260
del buf260
triton_poi_fused_convolution_2[grid(294912)](buf261, primals_175,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_175
buf262 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf263 = reinterpret_tensor(buf262, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf262
buf264 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf263,
primals_177, primals_176, buf264, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf265 = extern_kernels.convolution(buf261, buf264, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf265, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf266 = buf265
del buf265
triton_poi_fused_convolution_2[grid(294912)](buf266, primals_178,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_178
buf267 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf268 = reinterpret_tensor(buf267, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf267
buf269 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf268,
primals_180, primals_179, buf269, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf270 = extern_kernels.convolution(buf266, buf269, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf270, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf271 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_183,
buf271, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf272 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_183,
primals_182, buf271, buf272, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf273 = extern_kernels.convolution(buf257, buf272, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf273, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf274 = buf273
del buf273
triton_poi_fused_convolution_2[grid(294912)](buf274, primals_184,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_184
buf275 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf276 = reinterpret_tensor(buf275, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf275
buf277 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf276,
primals_186, primals_185, buf277, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf278 = extern_kernels.convolution(buf274, buf277, stride=(1, 1),
padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf278, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf279 = buf278
del buf278
triton_poi_fused_convolution_2[grid(294912)](buf279, primals_187,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_187
buf280 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf281 = reinterpret_tensor(buf280, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf280
buf282 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf281,
primals_189, primals_188, buf282, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf283 = extern_kernels.convolution(buf279, buf282, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf283, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf284 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf270, primals_181, buf283,
primals_190, buf284, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf285 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_192,
buf285, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf286 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_192,
primals_191, buf285, buf286, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf287 = extern_kernels.convolution(buf284, buf286, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf287, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf288 = buf287
del buf287
triton_poi_fused_convolution_2[grid(294912)](buf288, primals_193,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_193
buf289 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf290 = reinterpret_tensor(buf289, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf289
buf291 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf290,
primals_195, primals_194, buf291, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf292 = extern_kernels.convolution(buf288, buf291, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf292, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf293 = buf292
del buf292
triton_poi_fused_convolution_2[grid(294912)](buf293, primals_196,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_196
buf294 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf295 = reinterpret_tensor(buf294, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf294
buf296 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf295,
primals_198, primals_197, buf296, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf297 = extern_kernels.convolution(buf293, buf296, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf297, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf298 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_201,
buf298, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf299 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_201,
primals_200, buf298, buf299, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf300 = extern_kernels.convolution(buf284, buf299, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf300, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf301 = buf300
del buf300
triton_poi_fused_convolution_2[grid(294912)](buf301, primals_202,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_202
buf302 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf303 = reinterpret_tensor(buf302, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf302
buf304 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf303,
primals_204, primals_203, buf304, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf305 = extern_kernels.convolution(buf301, buf304, stride=(1, 1),
padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf305, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf306 = buf305
del buf305
triton_poi_fused_convolution_2[grid(294912)](buf306, primals_205,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_205
buf307 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf308 = reinterpret_tensor(buf307, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf307
buf309 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf308,
primals_207, primals_206, buf309, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf310 = extern_kernels.convolution(buf306, buf309, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf310, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf311 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf297, primals_199, buf310,
primals_208, buf311, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf312 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_210,
buf312, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf313 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_210,
primals_209, buf312, buf313, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf314 = extern_kernels.convolution(buf311, buf313, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf314, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf315 = buf314
del buf314
triton_poi_fused_convolution_2[grid(294912)](buf315, primals_211,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_211
buf316 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf317 = reinterpret_tensor(buf316, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf316
buf318 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf317,
primals_213, primals_212, buf318, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf319 = extern_kernels.convolution(buf315, buf318, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf319, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf320 = buf319
del buf319
triton_poi_fused_convolution_2[grid(294912)](buf320, primals_214,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_214
buf321 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf322 = reinterpret_tensor(buf321, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf321
buf323 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf322,
primals_216, primals_215, buf323, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf324 = extern_kernels.convolution(buf320, buf323, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf324, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf325 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_219,
buf325, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf326 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_219,
primals_218, buf325, buf326, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf327 = extern_kernels.convolution(buf311, buf326, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf327, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf328 = buf327
del buf327
triton_poi_fused_convolution_2[grid(294912)](buf328, primals_220,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_220
buf329 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf330 = reinterpret_tensor(buf329, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf329
buf331 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf330,
primals_222, primals_221, buf331, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf332 = extern_kernels.convolution(buf328, buf331, stride=(1, 1),
padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf332, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf333 = buf332
del buf332
triton_poi_fused_convolution_2[grid(294912)](buf333, primals_223,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_223
buf334 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf335 = reinterpret_tensor(buf334, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf334
buf336 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf335,
primals_225, primals_224, buf336, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf337 = extern_kernels.convolution(buf333, buf336, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf337, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf338 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf324, primals_217, buf337,
primals_226, buf338, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf339 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf340 = reinterpret_tensor(buf339, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf339
buf341 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.
float32)
triton_per_fused__weight_norm_interface_6[grid(18)](buf340,
primals_228, primals_227, buf341, 18, 18, XBLOCK=32, num_warps=
8, num_stages=1)
buf342 = extern_kernels.convolution(buf338, buf341, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf342, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf343 = buf342
del buf342
triton_poi_fused_add_convolution_7[grid(294912)](buf343,
primals_229, buf257, 294912, XBLOCK=1024, num_warps=4, num_stages=1
)
del primals_229
buf344 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_231,
buf344, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf345 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_231,
primals_230, buf344, buf345, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf346 = extern_kernels.convolution(buf343, buf345, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf346, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf347 = buf346
del buf346
triton_poi_fused_convolution_2[grid(294912)](buf347, primals_232,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_232
buf348 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf349 = reinterpret_tensor(buf348, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf348
buf350 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf349,
primals_234, primals_233, buf350, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf351 = extern_kernels.convolution(buf347, buf350, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf351, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf352 = buf351
del buf351
triton_poi_fused_convolution_2[grid(294912)](buf352, primals_235,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_235
buf353 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf354 = reinterpret_tensor(buf353, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf353
buf355 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf354,
primals_237, primals_236, buf355, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf356 = extern_kernels.convolution(buf352, buf355, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf356, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf357 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_240,
buf357, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf358 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_240,
primals_239, buf357, buf358, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf359 = extern_kernels.convolution(buf343, buf358, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf359, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf360 = buf359
del buf359
triton_poi_fused_convolution_2[grid(294912)](buf360, primals_241,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_241
buf361 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf362 = reinterpret_tensor(buf361, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf361
buf363 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf362,
primals_243, primals_242, buf363, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf364 = extern_kernels.convolution(buf360, buf363, stride=(1, 1),
padding=(3, 3), dilation=(3, 3), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf364, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf365 = buf364
del buf364
triton_poi_fused_convolution_2[grid(294912)](buf365, primals_244,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_244
buf366 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf367 = reinterpret_tensor(buf366, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf366
buf368 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf367,
primals_246, primals_245, buf368, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf369 = extern_kernels.convolution(buf365, buf368, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf369, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf370 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf356, primals_238, buf369,
primals_247, buf370, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf371 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_249,
buf371, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf372 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_249,
primals_248, buf371, buf372, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf373 = extern_kernels.convolution(buf370, buf372, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf373, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf374 = buf373
del buf373
triton_poi_fused_convolution_2[grid(294912)](buf374, primals_250,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_250
buf375 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf376 = reinterpret_tensor(buf375, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf375
buf377 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf376,
primals_252, primals_251, buf377, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf378 = extern_kernels.convolution(buf374, buf377, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf378, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf379 = buf378
del buf378
triton_poi_fused_convolution_2[grid(294912)](buf379, primals_253,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_253
buf380 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf381 = reinterpret_tensor(buf380, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf380
buf382 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf381,
primals_255, primals_254, buf382, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf383 = extern_kernels.convolution(buf379, buf382, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf383, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf384 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_258,
buf384, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf385 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_258,
primals_257, buf384, buf385, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf386 = extern_kernels.convolution(buf370, buf385, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf386, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf387 = buf386
del buf386
triton_poi_fused_convolution_2[grid(294912)](buf387, primals_259,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_259
buf388 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf389 = reinterpret_tensor(buf388, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf388
buf390 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf389,
primals_261, primals_260, buf390, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf391 = extern_kernels.convolution(buf387, buf390, stride=(1, 1),
padding=(3, 3), dilation=(3, 3), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf391, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf392 = buf391
del buf391
triton_poi_fused_convolution_2[grid(294912)](buf392, primals_262,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_262
buf393 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf394 = reinterpret_tensor(buf393, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf393
buf395 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf394,
primals_264, primals_263, buf395, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf396 = extern_kernels.convolution(buf392, buf395, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf396, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf397 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf383, primals_256, buf396,
primals_265, buf397, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf398 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_267,
buf398, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf399 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_267,
primals_266, buf398, buf399, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf400 = extern_kernels.convolution(buf397, buf399, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf400, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf401 = buf400
del buf400
triton_poi_fused_convolution_2[grid(294912)](buf401, primals_268,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_268
buf402 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf403 = reinterpret_tensor(buf402, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf402
buf404 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf403,
primals_270, primals_269, buf404, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf405 = extern_kernels.convolution(buf401, buf404, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf405, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf406 = buf405
del buf405
triton_poi_fused_convolution_2[grid(294912)](buf406, primals_271,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_271
buf407 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf408 = reinterpret_tensor(buf407, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf407
buf409 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf408,
primals_273, primals_272, buf409, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf410 = extern_kernels.convolution(buf406, buf409, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf410, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf411 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_276,
buf411, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf412 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_276,
primals_275, buf411, buf412, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf413 = extern_kernels.convolution(buf397, buf412, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf413, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf414 = buf413
del buf413
triton_poi_fused_convolution_2[grid(294912)](buf414, primals_277,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_277
buf415 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf416 = reinterpret_tensor(buf415, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf415
buf417 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf416,
primals_279, primals_278, buf417, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf418 = extern_kernels.convolution(buf414, buf417, stride=(1, 1),
padding=(3, 3), dilation=(3, 3), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf418, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf419 = buf418
del buf418
triton_poi_fused_convolution_2[grid(294912)](buf419, primals_280,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_280
buf420 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf421 = reinterpret_tensor(buf420, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf420
buf422 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf421,
primals_282, primals_281, buf422, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf423 = extern_kernels.convolution(buf419, buf422, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf423, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf424 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf410, primals_274, buf423,
primals_283, buf424, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf425 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf426 = reinterpret_tensor(buf425, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf425
buf427 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.
float32)
triton_per_fused__weight_norm_interface_6[grid(18)](buf426,
primals_285, primals_284, buf427, 18, 18, XBLOCK=32, num_warps=
8, num_stages=1)
buf428 = extern_kernels.convolution(buf424, buf427, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf428, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf429 = buf428
del buf428
triton_poi_fused_add_convolution_7[grid(294912)](buf429,
primals_286, buf343, 294912, XBLOCK=1024, num_warps=4, num_stages=1
)
del primals_286
buf430 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_288,
buf430, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf431 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_288,
primals_287, buf430, buf431, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf432 = extern_kernels.convolution(buf429, buf431, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf432, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf433 = buf432
del buf432
triton_poi_fused_convolution_2[grid(294912)](buf433, primals_289,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_289
buf434 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf435 = reinterpret_tensor(buf434, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf434
buf436 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf435,
primals_291, primals_290, buf436, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf437 = extern_kernels.convolution(buf433, buf436, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf437, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf438 = buf437
del buf437
triton_poi_fused_convolution_2[grid(294912)](buf438, primals_292,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_292
buf439 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf440 = reinterpret_tensor(buf439, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf439
buf441 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf440,
primals_294, primals_293, buf441, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf442 = extern_kernels.convolution(buf438, buf441, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf442, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf443 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_297,
buf443, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf444 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_297,
primals_296, buf443, buf444, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf445 = extern_kernels.convolution(buf429, buf444, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf445, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf446 = buf445
del buf445
triton_poi_fused_convolution_2[grid(294912)](buf446, primals_298,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_298
buf447 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf448 = reinterpret_tensor(buf447, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf447
buf449 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf448,
primals_300, primals_299, buf449, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf450 = extern_kernels.convolution(buf446, buf449, stride=(1, 1),
padding=(3, 3), dilation=(3, 3), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf450, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf451 = buf450
del buf450
triton_poi_fused_convolution_2[grid(294912)](buf451, primals_301,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_301
buf452 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf453 = reinterpret_tensor(buf452, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf452
buf454 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf453,
primals_303, primals_302, buf454, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf455 = extern_kernels.convolution(buf451, buf454, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf455, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf456 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf442, primals_295, buf455,
primals_304, buf456, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf457 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_306,
buf457, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf458 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_306,
primals_305, buf457, buf458, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf459 = extern_kernels.convolution(buf456, buf458, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf459, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf460 = buf459
del buf459
triton_poi_fused_convolution_2[grid(294912)](buf460, primals_307,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_307
buf461 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf462 = reinterpret_tensor(buf461, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf461
buf463 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf462,
primals_309, primals_308, buf463, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf464 = extern_kernels.convolution(buf460, buf463, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf464, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf465 = buf464
del buf464
triton_poi_fused_convolution_2[grid(294912)](buf465, primals_310,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_310
buf466 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf467 = reinterpret_tensor(buf466, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf466
buf468 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf467,
primals_312, primals_311, buf468, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf469 = extern_kernels.convolution(buf465, buf468, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf469, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf470 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_315,
buf470, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf471 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_315,
primals_314, buf470, buf471, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf472 = extern_kernels.convolution(buf456, buf471, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf472, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf473 = buf472
del buf472
triton_poi_fused_convolution_2[grid(294912)](buf473, primals_316,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_316
buf474 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf475 = reinterpret_tensor(buf474, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf474
buf476 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf475,
primals_318, primals_317, buf476, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf477 = extern_kernels.convolution(buf473, buf476, stride=(1, 1),
padding=(3, 3), dilation=(3, 3), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf477, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf478 = buf477
del buf477
triton_poi_fused_convolution_2[grid(294912)](buf478, primals_319,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_319
buf479 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf480 = reinterpret_tensor(buf479, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf479
buf481 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf480,
primals_321, primals_320, buf481, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf482 = extern_kernels.convolution(buf478, buf481, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf482, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf483 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf469, primals_313, buf482,
primals_322, buf483, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf484 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_324,
buf484, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf485 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_324,
primals_323, buf484, buf485, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf486 = extern_kernels.convolution(buf483, buf485, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf486, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf487 = buf486
del buf486
triton_poi_fused_convolution_2[grid(294912)](buf487, primals_325,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_325
buf488 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf489 = reinterpret_tensor(buf488, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf488
buf490 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf489,
primals_327, primals_326, buf490, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf491 = extern_kernels.convolution(buf487, buf490, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf491, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf492 = buf491
del buf491
triton_poi_fused_convolution_2[grid(294912)](buf492, primals_328,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_328
buf493 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf494 = reinterpret_tensor(buf493, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf493
buf495 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf494,
primals_330, primals_329, buf495, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf496 = extern_kernels.convolution(buf492, buf495, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf496, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf497 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_333,
buf497, 18, XBLOCK=32, num_warps=1, num_stages=1)
buf498 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_333,
primals_332, buf497, buf498, 108, XBLOCK=128, num_warps=4,
num_stages=1)
buf499 = extern_kernels.convolution(buf483, buf498, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf499, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf500 = buf499
del buf499
triton_poi_fused_convolution_2[grid(294912)](buf500, primals_334,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_334
buf501 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf502 = reinterpret_tensor(buf501, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf501
buf503 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf502,
primals_336, primals_335, buf503, 18, 9, XBLOCK=32, num_warps=4,
num_stages=1)
buf504 = extern_kernels.convolution(buf500, buf503, stride=(1, 1),
padding=(3, 3), dilation=(3, 3), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf504, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf505 = buf504
del buf504
triton_poi_fused_convolution_2[grid(294912)](buf505, primals_337,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_337
buf506 = empty_strided_cuda((9, 1, 1, 1), (1, 9, 9, 9), torch.float32)
buf507 = reinterpret_tensor(buf506, (9, 1, 1, 1), (1, 1, 1, 1), 0)
del buf506
buf508 = empty_strided_cuda((9, 18, 1, 1), (18, 1, 1, 1), torch.float32
)
triton_per_fused__weight_norm_interface_4[grid(9)](buf507,
primals_339, primals_338, buf508, 9, 18, XBLOCK=1, num_warps=2,
num_stages=1)
buf509 = extern_kernels.convolution(buf505, buf508, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf509, (4, 9, 64, 64), (36864, 4096, 64, 1))
buf510 = empty_strided_cuda((4, 18, 64, 64), (73728, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_5[grid(294912)](buf496, primals_331, buf509,
primals_340, buf510, 294912, XBLOCK=512, num_warps=8, num_stages=1)
buf511 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.
float32)
buf512 = reinterpret_tensor(buf511, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf511
buf513 = empty_strided_cuda((18, 18, 1, 1), (18, 1, 1, 1), torch.
float32)
triton_per_fused__weight_norm_interface_6[grid(18)](buf512,
primals_342, primals_341, buf513, 18, 18, XBLOCK=32, num_warps=
8, num_stages=1)
buf514 = extern_kernels.convolution(buf510, buf513, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf514, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf515 = buf514
del buf514
triton_poi_fused_add_convolution_8[grid(294912)](buf515,
primals_343, buf429, primals_1, 294912, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_343
buf516 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf509, primals_340, buf516, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf509
del primals_340
buf517 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf496, primals_331, buf517, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf496
del primals_331
buf518 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf482, primals_322, buf518, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf482
del primals_322
buf519 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf469, primals_313, buf519, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf469
del primals_313
buf520 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf455, primals_304, buf520, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf455
del primals_304
buf521 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf442, primals_295, buf521, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf442
del primals_295
buf522 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf423, primals_283, buf522, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf423
del primals_283
buf523 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf410, primals_274, buf523, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf410
del primals_274
buf524 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf396, primals_265, buf524, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf396
del primals_265
buf525 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf383, primals_256, buf525, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf383
del primals_256
buf526 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf369, primals_247, buf526, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf369
del primals_247
buf527 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf356, primals_238, buf527, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf356
del primals_238
buf528 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf337, primals_226, buf528, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf337
del primals_226
buf529 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf324, primals_217, buf529, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf324
del primals_217
buf530 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf310, primals_208, buf530, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf310
del primals_208
buf531 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf297, primals_199, buf531, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf297
del primals_199
buf532 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf283, primals_190, buf532, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf283
del primals_190
buf533 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf270, primals_181, buf533, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf270
del primals_181
buf534 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf251, primals_169, buf534, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf251
del primals_169
buf535 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf238, primals_160, buf535, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf238
del primals_160
buf536 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf224, primals_151, buf536, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf224
del primals_151
buf537 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf211, primals_142, buf537, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf211
del primals_142
buf538 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf197, primals_133, buf538, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf197
del primals_133
buf539 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf184, primals_124, buf539, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf184
del primals_124
buf540 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf165, primals_112, buf540, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf165
del primals_112
buf541 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf152, primals_103, buf541, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf152
del primals_103
buf542 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf138, primals_94, buf542, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf138
del primals_94
buf543 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf125, primals_85, buf543, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf125
del primals_85
buf544 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf111, primals_76, buf544, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf111
del primals_76
buf545 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf98, primals_67, buf545, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf98
del primals_67
buf546 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf79, primals_55, buf546, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf79
del primals_55
buf547 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf66, primals_46, buf547, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf66
del primals_46
buf548 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf52, primals_37, buf548, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf52
del primals_37
buf549 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf39, primals_28, buf549, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf39
del primals_28
buf550 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf25, primals_19, buf550, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf25
del primals_19
buf551 = empty_strided_cuda((4, 9, 64, 64), (36864, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_9[grid(
147456)](buf12, primals_10, buf551, 147456, XBLOCK=1024,
num_warps=4, num_stages=1)
del buf12
del primals_10
return (buf515, buf1, buf6, buf11, buf14, buf19, buf24, buf28, buf33,
buf38, buf41, buf46, buf51, buf55, buf60, buf65, buf68, buf73,
buf78, buf83, buf87, buf92, buf97, buf100, buf105, buf110, buf114,
buf119, buf124, buf127, buf132, buf137, buf141, buf146, buf151,
buf154, buf159, buf164, buf169, buf173, buf178, buf183, buf186,
buf191, buf196, buf200, buf205, buf210, buf213, buf218, buf223,
buf227, buf232, buf237, buf240, buf245, buf250, buf255, buf259,
buf264, buf269, buf272, buf277, buf282, buf286, buf291, buf296,
buf299, buf304, buf309, buf313, buf318, buf323, buf326, buf331,
buf336, buf341, buf345, buf350, buf355, buf358, buf363, buf368,
buf372, buf377, buf382, buf385, buf390, buf395, buf399, buf404,
buf409, buf412, buf417, buf422, buf427, buf431, buf436, buf441,
buf444, buf449, buf454, buf458, buf463, buf468, buf471, buf476,
buf481, buf485, buf490, buf495, buf498, buf503, buf508, buf513,
primals_1, primals_2, primals_3, primals_5, primals_6, primals_8,
primals_9, primals_11, primals_12, primals_14, primals_15,
primals_17, primals_18, primals_20, primals_21, primals_23,
primals_24, primals_26, primals_27, primals_29, primals_30,
primals_32, primals_33, primals_35, primals_36, primals_38,
primals_39, primals_41, primals_42, primals_44, primals_45,
primals_47, primals_48, primals_50, primals_51, primals_53,
primals_54, primals_56, primals_57, primals_59, primals_60,
primals_62, primals_63, primals_65, primals_66, primals_68,
primals_69, primals_71, primals_72, primals_74, primals_75,
primals_77, primals_78, primals_80, primals_81, primals_83,
primals_84, primals_86, primals_87, primals_89, primals_90,
primals_92, primals_93, primals_95, primals_96, primals_98,
primals_99, primals_101, primals_102, primals_104, primals_105,
primals_107, primals_108, primals_110, primals_111, primals_113,
primals_114, primals_116, primals_117, primals_119, primals_120,
primals_122, primals_123, primals_125, primals_126, primals_128,
primals_129, primals_131, primals_132, primals_134, primals_135,
primals_137, primals_138, primals_140, primals_141, primals_143,
primals_144, primals_146, primals_147, primals_149, primals_150,
primals_152, primals_153, primals_155, primals_156, primals_158,
primals_159, primals_161, primals_162, primals_164, primals_165,
primals_167, primals_168, primals_170, primals_171, primals_173,
primals_174, primals_176, primals_177, primals_179, primals_180,
primals_182, primals_183, primals_185, primals_186, primals_188,
primals_189, primals_191, primals_192, primals_194, primals_195,
primals_197, primals_198, primals_200, primals_201, primals_203,
primals_204, primals_206, primals_207, primals_209, primals_210,
primals_212, primals_213, primals_215, primals_216, primals_218,
primals_219, primals_221, primals_222, primals_224, primals_225,
primals_227, primals_228, primals_230, primals_231, primals_233,
primals_234, primals_236, primals_237, primals_239, primals_240,
primals_242, primals_243, primals_245, primals_246, primals_248,
primals_249, primals_251, primals_252, primals_254, primals_255,
primals_257, primals_258, primals_260, primals_261, primals_263,
primals_264, primals_266, primals_267, primals_269, primals_270,
primals_272, primals_273, primals_275, primals_276, primals_278,
primals_279, primals_281, primals_282, primals_284, primals_285,
primals_287, primals_288, primals_290, primals_291, primals_293,
primals_294, primals_296, primals_297, primals_299, primals_300,
primals_302, primals_303, primals_305, primals_306, primals_308,
primals_309, primals_311, primals_312, primals_314, primals_315,
primals_317, primals_318, primals_320, primals_321, primals_323,
primals_324, primals_326, primals_327, primals_329, primals_330,
primals_332, primals_333, primals_335, primals_336, primals_338,
primals_339, primals_341, primals_342, buf0, buf1, buf3, buf5, buf6,
buf8, buf10, buf11, buf13, buf14, buf16, buf18, buf19, buf21, buf23,
buf24, buf26, buf27, buf28, buf30, buf32, buf33, buf35, buf37,
buf38, buf40, buf41, buf43, buf45, buf46, buf48, buf50, buf51,
buf53, buf54, buf55, buf57, buf59, buf60, buf62, buf64, buf65,
buf67, buf68, buf70, buf72, buf73, buf75, buf77, buf78, buf80,
buf82, buf83, buf85, buf86, buf87, buf89, buf91, buf92, buf94,
buf96, buf97, buf99, buf100, buf102, buf104, buf105, buf107, buf109,
buf110, buf112, buf113, buf114, buf116, buf118, buf119, buf121,
buf123, buf124, buf126, buf127, buf129, buf131, buf132, buf134,
buf136, buf137, buf139, buf140, buf141, buf143, buf145, buf146,
buf148, buf150, buf151, buf153, buf154, buf156, buf158, buf159,
buf161, buf163, buf164, buf166, buf168, buf169, buf171, buf172,
buf173, buf175, buf177, buf178, buf180, buf182, buf183, buf185,
buf186, buf188, buf190, buf191, buf193, buf195, buf196, buf198,
buf199, buf200, buf202, buf204, buf205, buf207, buf209, buf210,
buf212, buf213, buf215, buf217, buf218, buf220, buf222, buf223,
buf225, buf226, buf227, buf229, buf231, buf232, buf234, buf236,
buf237, buf239, buf240, buf242, buf244, buf245, buf247, buf249,
buf250, buf252, buf254, buf255, buf257, buf258, buf259, buf261,
buf263, buf264, buf266, buf268, buf269, buf271, buf272, buf274,
buf276, buf277, buf279, buf281, buf282, buf284, buf285, buf286,
buf288, buf290, buf291, buf293, buf295, buf296, buf298, buf299,
buf301, buf303, buf304, buf306, buf308, buf309, buf311, buf312,
buf313, buf315, buf317, buf318, buf320, buf322, buf323, buf325,
buf326, buf328, buf330, buf331, buf333, buf335, buf336, buf338,
buf340, buf341, buf343, buf344, buf345, buf347, buf349, buf350,
buf352, buf354, buf355, buf357, buf358, buf360, buf362, buf363,
buf365, buf367, buf368, buf370, buf371, buf372, buf374, buf376,
buf377, buf379, buf381, buf382, buf384, buf385, buf387, buf389,
buf390, buf392, buf394, buf395, buf397, buf398, buf399, buf401,
buf403, buf404, buf406, buf408, buf409, buf411, buf412, buf414,
buf416, buf417, buf419, buf421, buf422, buf424, buf426, buf427,
buf429, buf430, buf431, buf433, buf435, buf436, buf438, buf440,
buf441, buf443, buf444, buf446, buf448, buf449, buf451, buf453,
buf454, buf456, buf457, buf458, buf460, buf462, buf463, buf465,
buf467, buf468, buf470, buf471, buf473, buf475, buf476, buf478,
buf480, buf481, buf483, buf484, buf485, buf487, buf489, buf490,
buf492, buf494, buf495, buf497, buf498, buf500, buf502, buf503,
buf505, buf507, buf508, buf510, buf512, buf513, buf516, buf517,
buf518, buf519, buf520, buf521, buf522, buf523, buf524, buf525,
buf526, buf527, buf528, buf529, buf530, buf531, buf532, buf533,
buf534, buf535, buf536, buf537, buf538, buf539, buf540, buf541,
buf542, buf543, buf544, buf545, buf546, buf547, buf548, buf549,
buf550, buf551)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, groups=3):
super(ConvBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
def wn(x):
return torch.nn.utils.weight_norm(x)
self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
1, groups=self.groups))
self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
3, padding=1, groups=in_channels))
self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels,
1, groups=1))
def forward(self, x):
x = self.group_conv(x)
x = self.depth_conv(x)
x = self.point_conv(x)
return x
class ConvBlockD(nn.Module):
def __init__(self, in_channels, out_channels, groups=3, ker_size=2):
super(ConvBlockD, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
def wn(x):
return torch.nn.utils.weight_norm(x)
self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
1, groups=self.groups))
self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
3, padding=ker_size, dilation=ker_size, groups=in_channels))
self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels,
1, groups=1))
def forward(self, x):
x = self.group_conv(x)
x = self.depth_conv(x)
x = self.point_conv(x)
return x
class MIRB1(nn.Module):
def __init__(self, args):
super(MIRB1, self).__init__()
self.c_out = args.n_feats // 2
def wn(x):
return torch.nn.utils.weight_norm(x)
self.conv3_1 = ConvBlock(args.n_feats, self.c_out)
self.convd_1 = ConvBlock(args.n_feats, self.c_out)
self.conv3_2 = ConvBlock(args.n_feats, self.c_out)
self.convd_2 = ConvBlock(args.n_feats, self.c_out)
self.conv3_3 = ConvBlock(args.n_feats, self.c_out)
self.convd_3 = ConvBlock(args.n_feats, self.c_out)
self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1))
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
res = x
c1_1 = self.lrelu(self.conv3_1(res))
c2_1 = self.lrelu(self.convd_1(res))
c1_2 = self.lrelu(self.conv3_2(torch.cat([c1_1, c2_1], 1)))
c2_2 = self.lrelu(self.convd_2(torch.cat([c1_1, c2_1], 1)))
c1_4 = self.lrelu(self.conv3_3(torch.cat([c1_2, c2_2], 1)))
c2_4 = self.lrelu(self.convd_3(torch.cat([c1_2, c2_2], 1)))
out = self.conv_last(torch.cat([c1_4, c2_4], 1))
out = out + x
return out
class MIRB2(nn.Module):
def __init__(self, args):
super(MIRB2, self).__init__()
self.c_out = args.n_feats // 2
def wn(x):
return torch.nn.utils.weight_norm(x)
self.conv3_1 = ConvBlock(args.n_feats, self.c_out)
self.convd_1 = ConvBlockD(args.n_feats, self.c_out, ker_size=2)
self.conv3_2 = ConvBlock(args.n_feats, self.c_out)
self.convd_2 = ConvBlockD(args.n_feats, self.c_out, ker_size=2)
self.conv3_3 = ConvBlock(args.n_feats, self.c_out)
self.convd_3 = ConvBlockD(args.n_feats, self.c_out, ker_size=2)
self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1))
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
res = x
c1_1 = self.lrelu(self.conv3_1(res))
c2_1 = self.lrelu(self.convd_1(res))
c1_2 = self.lrelu(self.conv3_2(torch.cat([c1_1, c2_1], 1)))
c2_2 = self.lrelu(self.convd_2(torch.cat([c1_1, c2_1], 1)))
c1_4 = self.lrelu(self.conv3_3(torch.cat([c1_2, c2_2], 1)))
c2_4 = self.lrelu(self.convd_3(torch.cat([c1_2, c2_2], 1)))
out = self.conv_last(torch.cat([c1_4, c2_4], 1))
out = out + x
return out
class MIRB3(nn.Module):
def __init__(self, args):
super(MIRB3, self).__init__()
self.c_out = args.n_feats // 2
def wn(x):
return torch.nn.utils.weight_norm(x)
self.conv3_1 = ConvBlock(args.n_feats, self.c_out)
self.convd_1 = ConvBlockD(args.n_feats, self.c_out, ker_size=3)
self.conv3_2 = ConvBlock(args.n_feats, self.c_out)
self.convd_2 = ConvBlockD(args.n_feats, self.c_out, ker_size=3)
self.conv3_3 = ConvBlock(args.n_feats, self.c_out)
self.convd_3 = ConvBlockD(args.n_feats, self.c_out, ker_size=3)
self.conv_last = wn(nn.Conv2d(args.n_feats, args.n_feats, 1))
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
res = x
c1_1 = self.lrelu(self.conv3_1(res))
c2_1 = self.lrelu(self.convd_1(res))
c1_2 = self.lrelu(self.conv3_2(torch.cat([c1_1, c2_1], 1)))
c2_2 = self.lrelu(self.convd_2(torch.cat([c1_1, c2_1], 1)))
c1_4 = self.lrelu(self.conv3_3(torch.cat([c1_2, c2_2], 1)))
c2_4 = self.lrelu(self.convd_3(torch.cat([c1_2, c2_2], 1)))
out = self.conv_last(torch.cat([c1_4, c2_4], 1))
out = out + x
return out
class MMFBNew(nn.Module):
def __init__(self, args):
super(MMFBNew, self).__init__()
def wn(x):
return torch.nn.utils.weight_norm(x)
self.bs1 = MIRB1(args)
self.bs11 = MIRB1(args)
self.bs2 = MIRB2(args)
self.bs22 = MIRB2(args)
self.bs3 = MIRB3(args)
self.bs33 = MIRB3(args)
def forward(self, input_0):
primals_4 = self.bs1.conv3_1.group_conv.bias
primals_2 = self.bs1.conv3_1.group_conv.weight_g
primals_3 = self.bs1.conv3_1.group_conv.weight_v
primals_7 = self.bs1.conv3_1.depth_conv.bias
primals_5 = self.bs1.conv3_1.depth_conv.weight_g
primals_6 = self.bs1.conv3_1.depth_conv.weight_v
primals_10 = self.bs1.conv3_1.point_conv.bias
primals_8 = self.bs1.conv3_1.point_conv.weight_g
primals_9 = self.bs1.conv3_1.point_conv.weight_v
primals_13 = self.bs1.convd_1.group_conv.bias
primals_11 = self.bs1.convd_1.group_conv.weight_g
primals_12 = self.bs1.convd_1.group_conv.weight_v
primals_16 = self.bs1.convd_1.depth_conv.bias
primals_14 = self.bs1.convd_1.depth_conv.weight_g
primals_15 = self.bs1.convd_1.depth_conv.weight_v
primals_19 = self.bs1.convd_1.point_conv.bias
primals_17 = self.bs1.convd_1.point_conv.weight_g
primals_18 = self.bs1.convd_1.point_conv.weight_v
primals_22 = self.bs1.conv3_2.group_conv.bias
primals_20 = self.bs1.conv3_2.group_conv.weight_g
primals_21 = self.bs1.conv3_2.group_conv.weight_v
primals_25 = self.bs1.conv3_2.depth_conv.bias
primals_23 = self.bs1.conv3_2.depth_conv.weight_g
primals_24 = self.bs1.conv3_2.depth_conv.weight_v
primals_28 = self.bs1.conv3_2.point_conv.bias
primals_26 = self.bs1.conv3_2.point_conv.weight_g
primals_27 = self.bs1.conv3_2.point_conv.weight_v
primals_31 = self.bs1.convd_2.group_conv.bias
primals_29 = self.bs1.convd_2.group_conv.weight_g
primals_30 = self.bs1.convd_2.group_conv.weight_v
primals_34 = self.bs1.convd_2.depth_conv.bias
primals_32 = self.bs1.convd_2.depth_conv.weight_g
primals_33 = self.bs1.convd_2.depth_conv.weight_v
primals_37 = self.bs1.convd_2.point_conv.bias
primals_35 = self.bs1.convd_2.point_conv.weight_g
primals_36 = self.bs1.convd_2.point_conv.weight_v
primals_40 = self.bs1.conv3_3.group_conv.bias
primals_38 = self.bs1.conv3_3.group_conv.weight_g
primals_39 = self.bs1.conv3_3.group_conv.weight_v
primals_43 = self.bs1.conv3_3.depth_conv.bias
primals_41 = self.bs1.conv3_3.depth_conv.weight_g
primals_42 = self.bs1.conv3_3.depth_conv.weight_v
primals_46 = self.bs1.conv3_3.point_conv.bias
primals_44 = self.bs1.conv3_3.point_conv.weight_g
primals_45 = self.bs1.conv3_3.point_conv.weight_v
primals_49 = self.bs1.convd_3.group_conv.bias
primals_47 = self.bs1.convd_3.group_conv.weight_g
primals_48 = self.bs1.convd_3.group_conv.weight_v
primals_52 = self.bs1.convd_3.depth_conv.bias
primals_50 = self.bs1.convd_3.depth_conv.weight_g
primals_51 = self.bs1.convd_3.depth_conv.weight_v
primals_55 = self.bs1.convd_3.point_conv.bias
primals_53 = self.bs1.convd_3.point_conv.weight_g
primals_54 = self.bs1.convd_3.point_conv.weight_v
primals_58 = self.bs1.conv_last.bias
primals_56 = self.bs1.conv_last.weight_g
primals_57 = self.bs1.conv_last.weight_v
primals_61 = self.bs11.conv3_1.group_conv.bias
primals_59 = self.bs11.conv3_1.group_conv.weight_g
primals_60 = self.bs11.conv3_1.group_conv.weight_v
primals_64 = self.bs11.conv3_1.depth_conv.bias
primals_62 = self.bs11.conv3_1.depth_conv.weight_g
primals_63 = self.bs11.conv3_1.depth_conv.weight_v
primals_67 = self.bs11.conv3_1.point_conv.bias
primals_65 = self.bs11.conv3_1.point_conv.weight_g
primals_66 = self.bs11.conv3_1.point_conv.weight_v
primals_70 = self.bs11.convd_1.group_conv.bias
primals_68 = self.bs11.convd_1.group_conv.weight_g
primals_69 = self.bs11.convd_1.group_conv.weight_v
primals_73 = self.bs11.convd_1.depth_conv.bias
primals_71 = self.bs11.convd_1.depth_conv.weight_g
primals_72 = self.bs11.convd_1.depth_conv.weight_v
primals_76 = self.bs11.convd_1.point_conv.bias
primals_74 = self.bs11.convd_1.point_conv.weight_g
primals_75 = self.bs11.convd_1.point_conv.weight_v
primals_79 = self.bs11.conv3_2.group_conv.bias
primals_77 = self.bs11.conv3_2.group_conv.weight_g
primals_78 = self.bs11.conv3_2.group_conv.weight_v
primals_82 = self.bs11.conv3_2.depth_conv.bias
primals_80 = self.bs11.conv3_2.depth_conv.weight_g
primals_81 = self.bs11.conv3_2.depth_conv.weight_v
primals_85 = self.bs11.conv3_2.point_conv.bias
primals_83 = self.bs11.conv3_2.point_conv.weight_g
primals_84 = self.bs11.conv3_2.point_conv.weight_v
primals_88 = self.bs11.convd_2.group_conv.bias
primals_86 = self.bs11.convd_2.group_conv.weight_g
primals_87 = self.bs11.convd_2.group_conv.weight_v
primals_91 = self.bs11.convd_2.depth_conv.bias
primals_89 = self.bs11.convd_2.depth_conv.weight_g
primals_90 = self.bs11.convd_2.depth_conv.weight_v
primals_94 = self.bs11.convd_2.point_conv.bias
primals_92 = self.bs11.convd_2.point_conv.weight_g
primals_93 = self.bs11.convd_2.point_conv.weight_v
primals_97 = self.bs11.conv3_3.group_conv.bias
primals_95 = self.bs11.conv3_3.group_conv.weight_g
primals_96 = self.bs11.conv3_3.group_conv.weight_v
primals_100 = self.bs11.conv3_3.depth_conv.bias
primals_98 = self.bs11.conv3_3.depth_conv.weight_g
primals_99 = self.bs11.conv3_3.depth_conv.weight_v
primals_103 = self.bs11.conv3_3.point_conv.bias
primals_101 = self.bs11.conv3_3.point_conv.weight_g
primals_102 = self.bs11.conv3_3.point_conv.weight_v
primals_106 = self.bs11.convd_3.group_conv.bias
primals_104 = self.bs11.convd_3.group_conv.weight_g
primals_105 = self.bs11.convd_3.group_conv.weight_v
primals_109 = self.bs11.convd_3.depth_conv.bias
primals_107 = self.bs11.convd_3.depth_conv.weight_g
primals_108 = self.bs11.convd_3.depth_conv.weight_v
primals_112 = self.bs11.convd_3.point_conv.bias
primals_110 = self.bs11.convd_3.point_conv.weight_g
primals_111 = self.bs11.convd_3.point_conv.weight_v
primals_115 = self.bs11.conv_last.bias
primals_113 = self.bs11.conv_last.weight_g
primals_114 = self.bs11.conv_last.weight_v
primals_118 = self.bs2.conv3_1.group_conv.bias
primals_116 = self.bs2.conv3_1.group_conv.weight_g
primals_117 = self.bs2.conv3_1.group_conv.weight_v
primals_121 = self.bs2.conv3_1.depth_conv.bias
primals_119 = self.bs2.conv3_1.depth_conv.weight_g
primals_120 = self.bs2.conv3_1.depth_conv.weight_v
primals_124 = self.bs2.conv3_1.point_conv.bias
primals_122 = self.bs2.conv3_1.point_conv.weight_g
primals_123 = self.bs2.conv3_1.point_conv.weight_v
primals_127 = self.bs2.convd_1.group_conv.bias
primals_125 = self.bs2.convd_1.group_conv.weight_g
primals_126 = self.bs2.convd_1.group_conv.weight_v
primals_130 = self.bs2.convd_1.depth_conv.bias
primals_128 = self.bs2.convd_1.depth_conv.weight_g
primals_129 = self.bs2.convd_1.depth_conv.weight_v
primals_133 = self.bs2.convd_1.point_conv.bias
primals_131 = self.bs2.convd_1.point_conv.weight_g
primals_132 = self.bs2.convd_1.point_conv.weight_v
primals_136 = self.bs2.conv3_2.group_conv.bias
primals_134 = self.bs2.conv3_2.group_conv.weight_g
primals_135 = self.bs2.conv3_2.group_conv.weight_v
primals_139 = self.bs2.conv3_2.depth_conv.bias
primals_137 = self.bs2.conv3_2.depth_conv.weight_g
primals_138 = self.bs2.conv3_2.depth_conv.weight_v
primals_142 = self.bs2.conv3_2.point_conv.bias
primals_140 = self.bs2.conv3_2.point_conv.weight_g
primals_141 = self.bs2.conv3_2.point_conv.weight_v
primals_145 = self.bs2.convd_2.group_conv.bias
primals_143 = self.bs2.convd_2.group_conv.weight_g
primals_144 = self.bs2.convd_2.group_conv.weight_v
primals_148 = self.bs2.convd_2.depth_conv.bias
primals_146 = self.bs2.convd_2.depth_conv.weight_g
primals_147 = self.bs2.convd_2.depth_conv.weight_v
primals_151 = self.bs2.convd_2.point_conv.bias
primals_149 = self.bs2.convd_2.point_conv.weight_g
primals_150 = self.bs2.convd_2.point_conv.weight_v
primals_154 = self.bs2.conv3_3.group_conv.bias
primals_152 = self.bs2.conv3_3.group_conv.weight_g
primals_153 = self.bs2.conv3_3.group_conv.weight_v
primals_157 = self.bs2.conv3_3.depth_conv.bias
primals_155 = self.bs2.conv3_3.depth_conv.weight_g
primals_156 = self.bs2.conv3_3.depth_conv.weight_v
primals_160 = self.bs2.conv3_3.point_conv.bias
primals_158 = self.bs2.conv3_3.point_conv.weight_g
primals_159 = self.bs2.conv3_3.point_conv.weight_v
primals_163 = self.bs2.convd_3.group_conv.bias
primals_161 = self.bs2.convd_3.group_conv.weight_g
primals_162 = self.bs2.convd_3.group_conv.weight_v
primals_166 = self.bs2.convd_3.depth_conv.bias
primals_164 = self.bs2.convd_3.depth_conv.weight_g
primals_165 = self.bs2.convd_3.depth_conv.weight_v
primals_169 = self.bs2.convd_3.point_conv.bias
primals_167 = self.bs2.convd_3.point_conv.weight_g
primals_168 = self.bs2.convd_3.point_conv.weight_v
primals_172 = self.bs2.conv_last.bias
primals_170 = self.bs2.conv_last.weight_g
primals_171 = self.bs2.conv_last.weight_v
primals_175 = self.bs22.conv3_1.group_conv.bias
primals_173 = self.bs22.conv3_1.group_conv.weight_g
primals_174 = self.bs22.conv3_1.group_conv.weight_v
primals_178 = self.bs22.conv3_1.depth_conv.bias
primals_176 = self.bs22.conv3_1.depth_conv.weight_g
primals_177 = self.bs22.conv3_1.depth_conv.weight_v
primals_181 = self.bs22.conv3_1.point_conv.bias
primals_179 = self.bs22.conv3_1.point_conv.weight_g
primals_180 = self.bs22.conv3_1.point_conv.weight_v
primals_184 = self.bs22.convd_1.group_conv.bias
primals_182 = self.bs22.convd_1.group_conv.weight_g
primals_183 = self.bs22.convd_1.group_conv.weight_v
primals_187 = self.bs22.convd_1.depth_conv.bias
primals_185 = self.bs22.convd_1.depth_conv.weight_g
primals_186 = self.bs22.convd_1.depth_conv.weight_v
primals_190 = self.bs22.convd_1.point_conv.bias
primals_188 = self.bs22.convd_1.point_conv.weight_g
primals_189 = self.bs22.convd_1.point_conv.weight_v
primals_193 = self.bs22.conv3_2.group_conv.bias
primals_191 = self.bs22.conv3_2.group_conv.weight_g
primals_192 = self.bs22.conv3_2.group_conv.weight_v
primals_196 = self.bs22.conv3_2.depth_conv.bias
primals_194 = self.bs22.conv3_2.depth_conv.weight_g
primals_195 = self.bs22.conv3_2.depth_conv.weight_v
primals_199 = self.bs22.conv3_2.point_conv.bias
primals_197 = self.bs22.conv3_2.point_conv.weight_g
primals_198 = self.bs22.conv3_2.point_conv.weight_v
primals_202 = self.bs22.convd_2.group_conv.bias
primals_200 = self.bs22.convd_2.group_conv.weight_g
primals_201 = self.bs22.convd_2.group_conv.weight_v
primals_205 = self.bs22.convd_2.depth_conv.bias
primals_203 = self.bs22.convd_2.depth_conv.weight_g
primals_204 = self.bs22.convd_2.depth_conv.weight_v
primals_208 = self.bs22.convd_2.point_conv.bias
primals_206 = self.bs22.convd_2.point_conv.weight_g
primals_207 = self.bs22.convd_2.point_conv.weight_v
primals_211 = self.bs22.conv3_3.group_conv.bias
primals_209 = self.bs22.conv3_3.group_conv.weight_g
primals_210 = self.bs22.conv3_3.group_conv.weight_v
primals_214 = self.bs22.conv3_3.depth_conv.bias
primals_212 = self.bs22.conv3_3.depth_conv.weight_g
primals_213 = self.bs22.conv3_3.depth_conv.weight_v
primals_217 = self.bs22.conv3_3.point_conv.bias
primals_215 = self.bs22.conv3_3.point_conv.weight_g
primals_216 = self.bs22.conv3_3.point_conv.weight_v
primals_220 = self.bs22.convd_3.group_conv.bias
primals_218 = self.bs22.convd_3.group_conv.weight_g
primals_219 = self.bs22.convd_3.group_conv.weight_v
primals_223 = self.bs22.convd_3.depth_conv.bias
primals_221 = self.bs22.convd_3.depth_conv.weight_g
primals_222 = self.bs22.convd_3.depth_conv.weight_v
primals_226 = self.bs22.convd_3.point_conv.bias
primals_224 = self.bs22.convd_3.point_conv.weight_g
primals_225 = self.bs22.convd_3.point_conv.weight_v
primals_229 = self.bs22.conv_last.bias
primals_227 = self.bs22.conv_last.weight_g
primals_228 = self.bs22.conv_last.weight_v
primals_232 = self.bs3.conv3_1.group_conv.bias
primals_230 = self.bs3.conv3_1.group_conv.weight_g
primals_231 = self.bs3.conv3_1.group_conv.weight_v
primals_235 = self.bs3.conv3_1.depth_conv.bias
primals_233 = self.bs3.conv3_1.depth_conv.weight_g
primals_234 = self.bs3.conv3_1.depth_conv.weight_v
primals_238 = self.bs3.conv3_1.point_conv.bias
primals_236 = self.bs3.conv3_1.point_conv.weight_g
primals_237 = self.bs3.conv3_1.point_conv.weight_v
primals_241 = self.bs3.convd_1.group_conv.bias
primals_239 = self.bs3.convd_1.group_conv.weight_g
primals_240 = self.bs3.convd_1.group_conv.weight_v
primals_244 = self.bs3.convd_1.depth_conv.bias
primals_242 = self.bs3.convd_1.depth_conv.weight_g
primals_243 = self.bs3.convd_1.depth_conv.weight_v
primals_247 = self.bs3.convd_1.point_conv.bias
primals_245 = self.bs3.convd_1.point_conv.weight_g
primals_246 = self.bs3.convd_1.point_conv.weight_v
primals_250 = self.bs3.conv3_2.group_conv.bias
primals_248 = self.bs3.conv3_2.group_conv.weight_g
primals_249 = self.bs3.conv3_2.group_conv.weight_v
primals_253 = self.bs3.conv3_2.depth_conv.bias
primals_251 = self.bs3.conv3_2.depth_conv.weight_g
primals_252 = self.bs3.conv3_2.depth_conv.weight_v
primals_256 = self.bs3.conv3_2.point_conv.bias
primals_254 = self.bs3.conv3_2.point_conv.weight_g
primals_255 = self.bs3.conv3_2.point_conv.weight_v
primals_259 = self.bs3.convd_2.group_conv.bias
primals_257 = self.bs3.convd_2.group_conv.weight_g
primals_258 = self.bs3.convd_2.group_conv.weight_v
primals_262 = self.bs3.convd_2.depth_conv.bias
primals_260 = self.bs3.convd_2.depth_conv.weight_g
primals_261 = self.bs3.convd_2.depth_conv.weight_v
primals_265 = self.bs3.convd_2.point_conv.bias
primals_263 = self.bs3.convd_2.point_conv.weight_g
primals_264 = self.bs3.convd_2.point_conv.weight_v
primals_268 = self.bs3.conv3_3.group_conv.bias
primals_266 = self.bs3.conv3_3.group_conv.weight_g
primals_267 = self.bs3.conv3_3.group_conv.weight_v
primals_271 = self.bs3.conv3_3.depth_conv.bias
primals_269 = self.bs3.conv3_3.depth_conv.weight_g
primals_270 = self.bs3.conv3_3.depth_conv.weight_v
primals_274 = self.bs3.conv3_3.point_conv.bias
primals_272 = self.bs3.conv3_3.point_conv.weight_g
primals_273 = self.bs3.conv3_3.point_conv.weight_v
primals_277 = self.bs3.convd_3.group_conv.bias
primals_275 = self.bs3.convd_3.group_conv.weight_g
primals_276 = self.bs3.convd_3.group_conv.weight_v
primals_280 = self.bs3.convd_3.depth_conv.bias
primals_278 = self.bs3.convd_3.depth_conv.weight_g
primals_279 = self.bs3.convd_3.depth_conv.weight_v
primals_283 = self.bs3.convd_3.point_conv.bias
primals_281 = self.bs3.convd_3.point_conv.weight_g
primals_282 = self.bs3.convd_3.point_conv.weight_v
primals_286 = self.bs3.conv_last.bias
primals_284 = self.bs3.conv_last.weight_g
primals_285 = self.bs3.conv_last.weight_v
primals_289 = self.bs33.conv3_1.group_conv.bias
primals_287 = self.bs33.conv3_1.group_conv.weight_g
primals_288 = self.bs33.conv3_1.group_conv.weight_v
primals_292 = self.bs33.conv3_1.depth_conv.bias
primals_290 = self.bs33.conv3_1.depth_conv.weight_g
primals_291 = self.bs33.conv3_1.depth_conv.weight_v
primals_295 = self.bs33.conv3_1.point_conv.bias
primals_293 = self.bs33.conv3_1.point_conv.weight_g
primals_294 = self.bs33.conv3_1.point_conv.weight_v
primals_298 = self.bs33.convd_1.group_conv.bias
primals_296 = self.bs33.convd_1.group_conv.weight_g
primals_297 = self.bs33.convd_1.group_conv.weight_v
primals_301 = self.bs33.convd_1.depth_conv.bias
primals_299 = self.bs33.convd_1.depth_conv.weight_g
primals_300 = self.bs33.convd_1.depth_conv.weight_v
primals_304 = self.bs33.convd_1.point_conv.bias
primals_302 = self.bs33.convd_1.point_conv.weight_g
primals_303 = self.bs33.convd_1.point_conv.weight_v
primals_307 = self.bs33.conv3_2.group_conv.bias
primals_305 = self.bs33.conv3_2.group_conv.weight_g
primals_306 = self.bs33.conv3_2.group_conv.weight_v
primals_310 = self.bs33.conv3_2.depth_conv.bias
primals_308 = self.bs33.conv3_2.depth_conv.weight_g
primals_309 = self.bs33.conv3_2.depth_conv.weight_v
primals_313 = self.bs33.conv3_2.point_conv.bias
primals_311 = self.bs33.conv3_2.point_conv.weight_g
primals_312 = self.bs33.conv3_2.point_conv.weight_v
primals_316 = self.bs33.convd_2.group_conv.bias
primals_314 = self.bs33.convd_2.group_conv.weight_g
primals_315 = self.bs33.convd_2.group_conv.weight_v
primals_319 = self.bs33.convd_2.depth_conv.bias
primals_317 = self.bs33.convd_2.depth_conv.weight_g
primals_318 = self.bs33.convd_2.depth_conv.weight_v
primals_322 = self.bs33.convd_2.point_conv.bias
primals_320 = self.bs33.convd_2.point_conv.weight_g
primals_321 = self.bs33.convd_2.point_conv.weight_v
primals_325 = self.bs33.conv3_3.group_conv.bias
primals_323 = self.bs33.conv3_3.group_conv.weight_g
primals_324 = self.bs33.conv3_3.group_conv.weight_v
primals_328 = self.bs33.conv3_3.depth_conv.bias
primals_326 = self.bs33.conv3_3.depth_conv.weight_g
primals_327 = self.bs33.conv3_3.depth_conv.weight_v
primals_331 = self.bs33.conv3_3.point_conv.bias
primals_329 = self.bs33.conv3_3.point_conv.weight_g
primals_330 = self.bs33.conv3_3.point_conv.weight_v
primals_334 = self.bs33.convd_3.group_conv.bias
primals_332 = self.bs33.convd_3.group_conv.weight_g
primals_333 = self.bs33.convd_3.group_conv.weight_v
primals_337 = self.bs33.convd_3.depth_conv.bias
primals_335 = self.bs33.convd_3.depth_conv.weight_g
primals_336 = self.bs33.convd_3.depth_conv.weight_v
primals_340 = self.bs33.convd_3.point_conv.bias
primals_338 = self.bs33.convd_3.point_conv.weight_g
primals_339 = self.bs33.convd_3.point_conv.weight_v
primals_343 = self.bs33.conv_last.bias
primals_341 = self.bs33.conv_last.weight_g
primals_342 = self.bs33.conv_last.weight_v
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58, primals_59,
primals_60, primals_61, primals_62, primals_63, primals_64,
primals_65, primals_66, primals_67, primals_68, primals_69,
primals_70, primals_71, primals_72, primals_73, primals_74,
primals_75, primals_76, primals_77, primals_78, primals_79,
primals_80, primals_81, primals_82, primals_83, primals_84,
primals_85, primals_86, primals_87, primals_88, primals_89,
primals_90, primals_91, primals_92, primals_93, primals_94,
primals_95, primals_96, primals_97, primals_98, primals_99,
primals_100, primals_101, primals_102, primals_103, primals_104,
primals_105, primals_106, primals_107, primals_108, primals_109,
primals_110, primals_111, primals_112, primals_113, primals_114,
primals_115, primals_116, primals_117, primals_118, primals_119,
primals_120, primals_121, primals_122, primals_123, primals_124,
primals_125, primals_126, primals_127, primals_128, primals_129,
primals_130, primals_131, primals_132, primals_133, primals_134,
primals_135, primals_136, primals_137, primals_138, primals_139,
primals_140, primals_141, primals_142, primals_143, primals_144,
primals_145, primals_146, primals_147, primals_148, primals_149,
primals_150, primals_151, primals_152, primals_153, primals_154,
primals_155, primals_156, primals_157, primals_158, primals_159,
primals_160, primals_161, primals_162, primals_163, primals_164,
primals_165, primals_166, primals_167, primals_168, primals_169,
primals_170, primals_171, primals_172, primals_173, primals_174,
primals_175, primals_176, primals_177, primals_178, primals_179,
primals_180, primals_181, primals_182, primals_183, primals_184,
primals_185, primals_186, primals_187, primals_188, primals_189,
primals_190, primals_191, primals_192, primals_193, primals_194,
primals_195, primals_196, primals_197, primals_198, primals_199,
primals_200, primals_201, primals_202, primals_203, primals_204,
primals_205, primals_206, primals_207, primals_208, primals_209,
primals_210, primals_211, primals_212, primals_213, primals_214,
primals_215, primals_216, primals_217, primals_218, primals_219,
primals_220, primals_221, primals_222, primals_223, primals_224,
primals_225, primals_226, primals_227, primals_228, primals_229,
primals_230, primals_231, primals_232, primals_233, primals_234,
primals_235, primals_236, primals_237, primals_238, primals_239,
primals_240, primals_241, primals_242, primals_243, primals_244,
primals_245, primals_246, primals_247, primals_248, primals_249,
primals_250, primals_251, primals_252, primals_253, primals_254,
primals_255, primals_256, primals_257, primals_258, primals_259,
primals_260, primals_261, primals_262, primals_263, primals_264,
primals_265, primals_266, primals_267, primals_268, primals_269,
primals_270, primals_271, primals_272, primals_273, primals_274,
primals_275, primals_276, primals_277, primals_278, primals_279,
primals_280, primals_281, primals_282, primals_283, primals_284,
primals_285, primals_286, primals_287, primals_288, primals_289,
primals_290, primals_291, primals_292, primals_293, primals_294,
primals_295, primals_296, primals_297, primals_298, primals_299,
primals_300, primals_301, primals_302, primals_303, primals_304,
primals_305, primals_306, primals_307, primals_308, primals_309,
primals_310, primals_311, primals_312, primals_313, primals_314,
primals_315, primals_316, primals_317, primals_318, primals_319,
primals_320, primals_321, primals_322, primals_323, primals_324,
primals_325, primals_326, primals_327, primals_328, primals_329,
primals_330, primals_331, primals_332, primals_333, primals_334,
primals_335, primals_336, primals_337, primals_338, primals_339,
primals_340, primals_341, primals_342, primals_343])
return output[0]
| wwjfsfs/wwjyyds | MMFB | false | 13,231 | [
"MIT"
]
| 0 | 80cd6267fde7cd98838078a0d5178a557ceb7414 | https://github.com/wwjfsfs/wwjyyds/tree/80cd6267fde7cd98838078a0d5178a557ceb7414 |
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/3r/c3rslv2exiiurkywxt4oxifqli4zlmoxh26mfj7b2xmxbacdj7n6.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 96
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ux/cuxxnuzgdmw5u74a5nvy6j4jxjaqgc7l5d5itaeimkkpqtdiearw.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16384], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 14641
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (14641*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (43923*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/wv/cwvtp6qflpb42kxrujmda5zselv7wvkz3fgp2tryo2ftsisaildr.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (288*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/nw/cnwm6ljuusoqjcwr2jdx6p2ue7ldghxjdr3oe62stiuqhsboiczy.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ih/cihu7ohoiwwrblocurozhw6ihpzbq4oc43mseo4n6wd7ronp74tw.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 32768
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/je/cjerwb7hfrzof7w4hhg26a2sn3nwrdv2vamyic4znls7dpwihtsy.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1874048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/wn/cwnu4gzv6mwa5pmfjc2o5gc7vpuyoyee4t773myheko4t75tl25y.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_6 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 460800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32) % 60
x2 = (xindex // 1920) % 60
x3 = (xindex // 115200)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1) + (7744*x2) + (468512*x3)), None)
tmp1 = tl.load(in_ptr0 + (32 + x0 + (64*x1) + (7744*x2) + (468512*x3)), None)
tmp3 = tl.load(in_ptr0 + (3872 + x0 + (64*x1) + (7744*x2) + (468512*x3)), None)
tmp5 = tl.load(in_ptr0 + (3904 + x0 + (64*x1) + (7744*x2) + (468512*x3)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4), tmp6, None)
tl.store(out_ptr1 + (x4), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/uw/cuw2ljmhrlp7zo7yldtfwuwerbrbokld5ppna7eumivequ5pt7qb.py
# Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_2 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 921600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/rr/crrstp2dyt2dryqsfd3dmo7ic77jj35vt43aaoq46e2nj4j43hur.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_3 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64) % 30
x2 = (xindex // 1920)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (7680*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (7680*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (3840 + x0 + (128*x1) + (7680*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (3904 + x0 + (128*x1) + (7680*x2)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/3l/c3lgrob2i2ovmpph5uzsa7fiqtbtb7ov5wlo6t7pk6nz4vxfyj46.py
# Topologically Sorted Source Nodes: [conv2d_2, x_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_4 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 460800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ov/covh4h5pbfthppfx7dl4aqot7xjrfuqhzavjgr5gcjaprird4dts.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_5 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_10 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 128
x1 = (xindex // 128) % 15
x2 = (xindex // 1920)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (256*x1) + (7680*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (128 + x0 + (256*x1) + (7680*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (3840 + x0 + (256*x1) + (7680*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (3968 + x0 + (256*x1) + (7680*x2)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/fv/cfvch2bbkxgfrzqp4bzysnrlzrqcvj7ehfy3mhbccf5xp3g5vhzv.py
# Topologically Sorted Source Nodes: [conv2d_3, x_6], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# x_6 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_relu_11 = async_compile.triton('triton_poi_fused_convolution_relu_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/4c/c4cp6bsrhplz3skp3wwcnhvvfuwgjhvzfcxkrxkikjcwz7jyxave.py
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_7 => _low_memory_max_pool2d_with_offsets_3, getitem_7
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_3 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_3, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_12 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256, 256], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 196
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y0 = yindex % 7
y1 = (yindex // 7) % 7
y2 = (yindex // 49)
y4 = yindex
y5 = yindex % 49
tmp0 = tl.load(in_ptr0 + (x3 + (512*y0) + (7680*y1) + (57600*y2)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (256 + x3 + (512*y0) + (7680*y1) + (57600*y2)), xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (3840 + x3 + (512*y0) + (7680*y1) + (57600*y2)), xmask & ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (4096 + x3 + (512*y0) + (7680*y1) + (57600*y2)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x3 + (256*y4)), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y5 + (49*x3) + (12544*y2)), tmp16, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/tl/ctlvp2mxvtowx2y2qsp6tht4xp3j2cjh2ewx2dwvz4qnw64uabvi.py
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_10 => relu_4
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_13 = async_compile.triton('triton_poi_fused_relu_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (x0), None)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x0), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 3, 121, 121), (43923, 14641, 121, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (4096, 50176), (50176, 1))
assert_size_stride(primals_11, (4096, ), (1, ))
assert_size_stride(primals_12, (133, 4096), (4096, 1))
assert_size_stride(primals_13, (133, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 3, 3, 3), (27, 1, 9, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 96, 9, grid=grid(96, 9), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 3, 121, 121), (43923, 1, 363, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 12, 14641, grid=grid(12, 14641), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 2048, 9, grid=grid(2048, 9), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_6, buf3, 8192, 9, grid=grid(8192, 9), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_8, buf4, 32768, 9, grid=grid(32768, 9), stream=stream0)
del primals_8
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 121, 121), (468512, 1, 3872, 32))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_5.run(buf6, primals_2, 1874048, grid=grid(1874048), stream=stream0)
del primals_2
buf7 = empty_strided_cuda((4, 32, 60, 60), (115200, 1, 1920, 32), torch.float32)
buf8 = empty_strided_cuda((4, 32, 60, 60), (115200, 1, 1920, 32), torch.int8)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_6.run(buf6, buf7, buf8, 460800, grid=grid(460800), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf7, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 60, 60), (230400, 1, 3840, 64))
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_7.run(buf10, primals_5, 921600, grid=grid(921600), stream=stream0)
del primals_5
buf11 = empty_strided_cuda((4, 64, 30, 30), (57600, 1, 1920, 64), torch.float32)
buf12 = empty_strided_cuda((4, 64, 30, 30), (57600, 1, 1920, 64), torch.int8)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_8.run(buf10, buf11, buf12, 230400, grid=grid(230400), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf11, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 30, 30), (115200, 1, 3840, 128))
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_9.run(buf14, primals_7, 460800, grid=grid(460800), stream=stream0)
del primals_7
buf15 = empty_strided_cuda((4, 128, 15, 15), (28800, 1, 1920, 128), torch.float32)
buf16 = empty_strided_cuda((4, 128, 15, 15), (28800, 1, 1920, 128), torch.int8)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_10.run(buf14, buf15, buf16, 115200, grid=grid(115200), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf17 = extern_kernels.convolution(buf15, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf18 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, x_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_11.run(buf18, primals_9, 230400, grid=grid(230400), stream=stream0)
del primals_9
buf19 = empty_strided_cuda((4, 256, 7, 7), (12544, 1, 1792, 256), torch.int8)
buf20 = empty_strided_cuda((4, 256, 7, 7), (12544, 49, 7, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_12.run(buf18, buf19, buf20, 196, 256, grid=grid(196, 256), stream=stream0)
buf21 = empty_strided_cuda((1, 4096), (4096, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf20, (1, 50176), (0, 1), 0), reinterpret_tensor(primals_10, (50176, 4096), (1, 50176), 0), out=buf21)
buf22 = buf21; del buf21 # reuse
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.relu]
triton_poi_fused_relu_13.run(buf22, primals_11, 4096, grid=grid(4096), stream=stream0)
del primals_11
buf23 = empty_strided_cuda((1, 133), (133, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, buf22, reinterpret_tensor(primals_12, (4096, 133), (1, 4096), 0), alpha=1, beta=1, out=buf23)
del primals_13
return (buf23, buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf10, buf11, buf12, buf14, buf15, buf16, buf18, buf19, reinterpret_tensor(buf20, (1, 50176), (50176, 1), 0), buf22, primals_12, primals_10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 121, 121), (43923, 14641, 121, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4096, 50176), (50176, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((133, 4096), (4096, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((133, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
import torch.optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=
3, padding=1)
self.max2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv3 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size
=3, padding=1)
self.max4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv5 = nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=3, padding=1)
self.max6 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv7 = nn.Conv2d(in_channels=128, out_channels=256,
kernel_size=3, padding=1)
self.max8 = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc9 = nn.Linear(256 * 14 * 14, 4096)
self.fc10 = nn.Linear(4096, 133)
self.dropout = nn.Dropout(0.25)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.max2(x)
x = F.relu(self.conv3(x))
x = self.max4(x)
x = F.relu(self.conv5(x))
x = self.max6(x)
x = F.relu(self.conv7(x))
x = self.max8(x)
x = x.view(-1, 256 * 14 * 14)
x = self.dropout(x)
x = F.relu(self.fc9(x))
x = self.dropout(x)
x = self.fc10(x)
return x
def get_inputs():
return [torch.rand([4, 3, 121, 121])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 96
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
xnumel = 14641
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 14641 * y3), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 43923 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1874048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32 % 60
x2 = xindex // 1920 % 60
x3 = xindex // 115200
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 7744 * x2 + 468512 * x3), None)
tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 7744 * x2 + 468512 * x3),
None)
tmp3 = tl.load(in_ptr0 + (3872 + x0 + 64 * x1 + 7744 * x2 + 468512 * x3
), None)
tmp5 = tl.load(in_ptr0 + (3904 + x0 + 64 * x1 + 7744 * x2 + 468512 * x3
), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x4, tmp6, None)
tl.store(out_ptr1 + x4, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 30
x2 = xindex // 1920
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 7680 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 7680 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (3840 + x0 + 128 * x1 + 7680 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (3904 + x0 + 128 * x1 + 7680 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 128
x1 = xindex // 128 % 15
x2 = xindex // 1920
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 7680 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 7680 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (3840 + x0 + 256 * x1 + 7680 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (3968 + x0 + 256 * x1 + 7680 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 196
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y0 = yindex % 7
y1 = yindex // 7 % 7
y2 = yindex // 49
y4 = yindex
y5 = yindex % 49
tmp0 = tl.load(in_ptr0 + (x3 + 512 * y0 + 7680 * y1 + 57600 * y2),
xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (256 + x3 + 512 * y0 + 7680 * y1 + 57600 * y2),
xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (3840 + x3 + 512 * y0 + 7680 * y1 + 57600 * y2
), xmask & ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (4096 + x3 + 512 * y0 + 7680 * y1 + 57600 *
y2), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x3 + 256 * y4), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y5 + 49 * x3 + 12544 * y2), tmp16, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x0, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 121, 121), (43923, 14641, 121, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (4096, 50176), (50176, 1))
assert_size_stride(primals_11, (4096,), (1,))
assert_size_stride(primals_12, (133, 4096), (4096, 1))
assert_size_stride(primals_13, (133,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 3, 3, 3), (27, 1, 9, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(96, 9)](primals_1, buf0, 96, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 121, 121), (43923, 1, 363, 3),
torch.float32)
triton_poi_fused_1[grid(12, 14641)](primals_3, buf1, 12, 14641,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_2[grid(2048, 9)](primals_4, buf2, 2048, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_4[grid(32768, 9)](primals_8, buf4, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 121, 121), (468512, 1, 3872, 32))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_5[grid(1874048)](buf6, primals_2,
1874048, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf7 = empty_strided_cuda((4, 32, 60, 60), (115200, 1, 1920, 32),
torch.float32)
buf8 = empty_strided_cuda((4, 32, 60, 60), (115200, 1, 1920, 32),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_6[grid(460800)](buf6, buf7,
buf8, 460800, XBLOCK=512, num_warps=8, num_stages=1)
buf9 = extern_kernels.convolution(buf7, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 60, 60), (230400, 1, 3840, 64))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_7[grid(921600)](buf10, primals_5,
921600, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf11 = empty_strided_cuda((4, 64, 30, 30), (57600, 1, 1920, 64),
torch.float32)
buf12 = empty_strided_cuda((4, 64, 30, 30), (57600, 1, 1920, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_8[grid(230400)](buf10,
buf11, buf12, 230400, XBLOCK=512, num_warps=8, num_stages=1)
buf13 = extern_kernels.convolution(buf11, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 30, 30), (115200, 1, 3840, 128))
buf14 = buf13
del buf13
triton_poi_fused_convolution_relu_9[grid(460800)](buf14, primals_7,
460800, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf15 = empty_strided_cuda((4, 128, 15, 15), (28800, 1, 1920, 128),
torch.float32)
buf16 = empty_strided_cuda((4, 128, 15, 15), (28800, 1, 1920, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_10[grid(115200)](buf14,
buf15, buf16, 115200, XBLOCK=512, num_warps=8, num_stages=1)
buf17 = extern_kernels.convolution(buf15, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf18 = buf17
del buf17
triton_poi_fused_convolution_relu_11[grid(230400)](buf18, primals_9,
230400, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf19 = empty_strided_cuda((4, 256, 7, 7), (12544, 1, 1792, 256),
torch.int8)
buf20 = empty_strided_cuda((4, 256, 7, 7), (12544, 49, 7, 1), torch
.float32)
triton_poi_fused_max_pool2d_with_indices_12[grid(196, 256)](buf18,
buf19, buf20, 196, 256, XBLOCK=256, YBLOCK=2, num_warps=4,
num_stages=1)
buf21 = empty_strided_cuda((1, 4096), (4096, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf20, (1, 50176), (0, 1), 0),
reinterpret_tensor(primals_10, (50176, 4096), (1, 50176), 0),
out=buf21)
buf22 = buf21
del buf21
triton_poi_fused_relu_13[grid(4096)](buf22, primals_11, 4096,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf23 = empty_strided_cuda((1, 133), (133, 1), torch.float32)
extern_kernels.addmm(primals_13, buf22, reinterpret_tensor(
primals_12, (4096, 133), (1, 4096), 0), alpha=1, beta=1, out=buf23)
del primals_13
return (buf23, buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf10,
buf11, buf12, buf14, buf15, buf16, buf18, buf19, reinterpret_tensor
(buf20, (1, 50176), (50176, 1), 0), buf22, primals_12, primals_10)
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=
3, padding=1)
self.max2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv3 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size
=3, padding=1)
self.max4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv5 = nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=3, padding=1)
self.max6 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv7 = nn.Conv2d(in_channels=128, out_channels=256,
kernel_size=3, padding=1)
self.max8 = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc9 = nn.Linear(256 * 14 * 14, 4096)
self.fc10 = nn.Linear(4096, 133)
self.dropout = nn.Dropout(0.25)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv3.weight
primals_5 = self.conv3.bias
primals_6 = self.conv5.weight
primals_7 = self.conv5.bias
primals_8 = self.conv7.weight
primals_9 = self.conv7.bias
primals_10 = self.fc9.weight
primals_11 = self.fc9.bias
primals_12 = self.fc10.weight
primals_13 = self.fc10.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| kawano8811/deep-learning-v2-pytorch | Net | false | 13,232 | [
"MIT"
]
| 0 | b7c453728cb85edf3b30e0aeb66b3861747bc043 | https://github.com/kawano8811/deep-learning-v2-pytorch/tree/b7c453728cb85edf3b30e0aeb66b3861747bc043 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.