python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
import numpy
"""Microbenchmarks for element-wise BatchGather operator."""
# Configs for C2 BatherGather operator
batch_gather_configs_short = op_bench.config_list(
attr_names=["M", "N", "K"],
attrs=[
[8, 8, 1],
[256, 512, 1],
[512, 512, 1],
[8, 8, 2],
[256, 512, 2],
[512, 512, 2],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=["short"]
)
batch_gather_configs_long = op_bench.cross_product_configs(
M=[128, 1024],
N=[128, 1024],
K=[1, 2],
device=['cpu', 'cuda'],
tags=["long"]
)
class BatchGatherBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, M, N, K, device):
self.input_one = self.tensor([M, N, K], device=device)
max_val = N
numpy.random.seed((1 << 32) - 1)
index_dim = numpy.random.randint(0, N)
self.index = self.feed_tensor(numpy.random.randint(0, max_val, index_dim), device=device)
self.output = self.tensor([M, index_dim, K], device=device)
self.set_module_name("batch_gather")
def forward(self):
op = core.CreateOperator("BatchGather", [self.input_one, self.index], self.output)
return op
op_bench_c2.generate_c2_test(
batch_gather_configs_long + batch_gather_configs_short, BatchGatherBenchmark
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/c2/batch_gather_test.py |
import operator_benchmark as op_bench
import benchmark_caffe2 as op_bench_c2
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for element-wise Add operator. Supports both Caffe2/PyTorch."""
# Configs for C2 add operator
add_long_configs = op_bench.cross_product_configs(
M=[8, 64, 128],
N=range(2, 10, 3),
K=[2 ** x for x in range(0, 3)],
dtype=["int", "float"],
tags=["long"]
)
add_short_configs = op_bench.config_list(
attrs=[
[8, 16, 32, "int"],
[16, 16, 64, "float"],
[64, 64, 128, "int"],
],
attr_names=["M", "N", "K", "dtype"],
tags=["short"],
)
class AddBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, M, N, K, dtype):
self.input_one = self.tensor([M, N, K], dtype)
self.input_two = self.tensor([M, N, K], dtype)
self.output = self.tensor([M, N, K], dtype)
self.set_module_name("add")
def forward(self):
op = core.CreateOperator(
"Add", [self.input_one, self.input_two], self.output, **self.args
)
return op
op_bench_c2.generate_c2_test(add_long_configs + add_short_configs, AddBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/c2/add_test.py |
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for QuantileOp operator."""
# Configs for C2 QuantileOp operator
quantile_op_long_configs = op_bench.cross_product_configs(
M=[32, 64, 128], N=range(32, 128, 32), dtype=["float", "double"], tags=["long"]
)
quantile_op_short_configs = op_bench.config_list(
attrs=[
[16, 16, "float"],
[16, 16, "double"],
[64, 64, "float"],
[64, 64, "double"],
],
attr_names=["M", "N", "dtype"],
tags=["short"],
)
class QuantileOpBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, M, N, dtype):
self.data = [self.tensor([N], dtype) for _ in range(M)]
self.quantile = 0.3
self.output = self.tensor([1], dtype)
self.set_module_name("quantile_op")
def forward(self):
op = core.CreateOperator(
"Quantile", inputs=self.data, outputs=self.output, quantile=self.quantile
)
return op
op_bench_c2.generate_c2_test(
quantile_op_long_configs + quantile_op_short_configs, QuantileOpBenchmark
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/c2/quantile_op_test.py |
pytorch-master | benchmarks/operator_benchmark/c2/__init__.py |
|
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for element-wise ReplaceNaN operator."""
# Configs for C2 ReplaceNaN operator
replace_nan_long_configs = op_bench.cross_product_configs(
M=[32, 64, 128], N=range(32, 128, 32), dtype=["float", "double"], tags=["long"]
)
replace_nan_short_configs = op_bench.config_list(
attrs=[
[16, 16, "float"],
[16, 16, "double"],
[64, 64, "float"],
[64, 64, "double"],
],
attr_names=["M", "N", "dtype"],
tags=["short"],
)
class ReplaceNaNBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, M, N, dtype):
self.input = self.tensor([M, N], dtype)
self.set_module_name("replace_nan")
def forward(self):
op = core.CreateOperator("ReplaceNaN", self.input, self.input, value=1.0)
return op
op_bench_c2.generate_c2_test(
replace_nan_long_configs + replace_nan_short_configs, ReplaceNaNBenchmark
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/c2/replace_nan_test.py |
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for BatchBoxCox operator."""
# Configs for C2 BatchBoxCox operator
batch_box_cox_long_configs = op_bench.cross_product_configs(
M=[32, 64, 128], N=range(32, 128, 32), dtype=["float", "double"], tags=["long"]
)
batch_box_cox_short_configs = op_bench.config_list(
attrs=[
[16, 16, "float"],
[16, 16, "double"],
[64, 64, "float"],
[64, 64, "double"],
],
attr_names=["M", "N", "dtype"],
tags=["short"],
)
class BatchBoxCoxBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, M, N, dtype):
self.data = self.tensor([M, N], dtype)
self.lambda1 = self.tensor([N], dtype)
self.lambda2 = self.tensor([N], dtype)
self.output = self.tensor([1, 1], dtype)
self.set_module_name("batch_box_cox")
def forward(self):
op = core.CreateOperator("BatchBoxCox", [self.data, self.lambda1, self.lambda2], self.output)
return op
op_bench_c2.generate_c2_test(
batch_box_cox_long_configs + batch_box_cox_short_configs, BatchBoxCoxBenchmark
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/c2/batch_box_cox_test.py |
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core, dyndep
dyndep.InitOpsLibrary("@/caffe2/caffe2/fb/operators:clip_ranges_op")
"""Microbenchmarks for ClipRanges operator."""
# Configs for C2 ClipRanges operator
clip_ranges_long_configs = op_bench.cross_product_configs(
LENGTH=range(1, 100),
M=[1],
N=[2],
MAX_LENGTH=range(1, 100),
dtype=["int32"],
tags=["long"]
)
clip_ranges_short_configs = op_bench.config_list(
attrs=[
[6, 1, 2, 1, "int32"],
[7, 1, 2, 2, "int32"],
[8, 1, 2, 3, "int32"],
[9, 1, 2, 4, "int32"],
[10, 1, 2, 5, "int32"],
],
attr_names=["LENGTH", "M", "N", "MAX_LENGTH", "dtype"],
tags=["short"],
)
class ClipRangesBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, LENGTH, M, N, MAX_LENGTH, dtype):
self.input = self.tensor([LENGTH, M, N], dtype)
self.max_length = MAX_LENGTH
self.set_module_name("clip_ranges")
def forward(self):
op = core.CreateOperator("ClipRanges", self.input, self.input, max_length=self.max_length)
return op
op_bench_c2.generate_c2_test(
clip_ranges_long_configs + clip_ranges_short_configs, ClipRangesBenchmark
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/c2/clip_ranges_test.py |
import operator_benchmark as op_bench
import benchmark_caffe2 as op_bench_c2
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for MatMul operator"""
# Configs for C2 Matmul operator
mm_long_configs = op_bench.cross_product_configs(
M=[8, 64, 128],
N=range(2, 10, 3),
K=[2 ** x for x in range(0, 3)],
trans_a=[True, False],
trans_b=[True, False],
tags=["long"]
)
mm_short_configs = op_bench.config_list(
attrs=[
[128, 128, 128, False, True],
[1024, 1024, 256, True, False],
[8192, 8192, 1024, True, False],
],
attr_names=["M", "N", "K", "trans_a", "trans_b"],
tags=["short"],
)
class MatMulBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, M, N, K, trans_a, trans_b):
self.input_one = self.tensor([N, M]) if trans_a else self.tensor([M, N])
self.input_two = self.tensor([K, N]) if trans_b else self.tensor([N, K])
self.args = {'trans_a': trans_a, 'trans_b': trans_b}
self.output = self.tensor([M, K])
self.set_module_name("matmul")
def forward(self):
op = core.CreateOperator(
"MatMul", [self.input_one, self.input_two], self.output, **self.args
)
return op
op_bench_c2.generate_c2_test(mm_long_configs + mm_short_configs, MatMulBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/c2/matmul_test.py |
import timeit
import torch
import torch.nn.functional as F
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._debug_set_fusion_group_inlining(False)
torch.set_num_threads(1)
def hardswish(x):
return x * torch.clamp(x + 3.0, 0.0, 6.0) / 6.0
unary_ops = [
hardswish,
torch._C._nn.hardswish,
torch.sigmoid,
torch.reciprocal,
torch.neg,
torch.relu,
torch.isnan,
torch.log,
torch.log10,
torch.log1p,
torch.log2,
torch.exp,
torch.expm1,
torch.erf,
torch.erfc,
torch.cos,
torch.sin,
torch.tan,
torch.acos,
torch.asin,
torch.cosh,
torch.sinh,
torch.atan,
torch.tanh,
torch.sqrt,
torch.rsqrt,
torch.abs,
torch.ceil,
torch.floor,
torch.round,
torch.trunc,
torch.lgamma,
]
print("{:20s} {:>10s} {:>10s} {:>10s}".format("op", "eager", "nnc", "speedup"))
for op in unary_ops:
x = torch.rand((1024, 1024))
traced = torch.jit.trace(lambda x: op(x), (x))
# Warmup.
warmup_iters = 8
for _ in range(warmup_iters):
op(x)
traced(x)
# Validate result.
torch.testing.assert_close(op(x), traced(x))
# Benchmark.
bench_iters = 100
teager = timeit.timeit(stmt="op(x)", globals=globals(), number=bench_iters)
tjit = timeit.timeit(stmt="traced(x)", globals=globals(), number=bench_iters)
print(f"{op.__name__:20s} {teager:10.3f} {tjit:10.3f} {teager/tjit:10.2f}")
def test_batch_norm():
op = F.batch_norm
print("{:20s} {:20s} {:>10s} {:>10s} {:>10s}".format("op", "shape", "eager", "nnc", "speedup"))
batch_norm_shapes = [
[1, 64, 112, 112],
[1, 256, 14, 14],
[1, 128, 28, 28],
[1, 64, 56, 56],
[1, 512, 7, 7],
[5, 64, 112, 112],
[5, 256, 14, 14],
[5, 128, 28, 28],
[5, 64, 56, 56],
[5, 512, 7, 7]]
for n, c, h, w in batch_norm_shapes:
x = torch.rand((n, c, h, w))
y = torch.rand((c))
z = torch.rand((c))
traced = torch.jit.trace(lambda x, y, z: op(x, y, z), (x, y, z))
# Warmup.
warmup_iters = 8
for _ in range(warmup_iters):
op(x, y, z)
traced(x, y, z)
# Validate result.
torch.testing.assert_close(op(x, y, z), traced(x, y, z))
# Benchmark.
bench_iters = 100
teager = timeit.timeit(stmt="op(x, y, z)", globals=locals(), number=bench_iters)
tjit = timeit.timeit(stmt="traced(x, y, z)", globals=locals(), number=bench_iters)
print(f"{op.__name__:20s} ({n:>3d}, {c:>3d}, {h:>3d}, {w:>3d}) {teager:10.3f} {tjit:10.3f} {teager/tjit:10.2f}")
test_batch_norm()
| pytorch-master | benchmarks/cpp/tensorexpr/bench_ops.py |
import argparse
import sys
import torch
import torch.utils.benchmark as benchmark_utils
try:
from benchmarks.fastrnns.factory import lstm_creator
except ImportError:
from caffe2.benchmarks.fastrnns.factory import lstm_creator
from torchvision.models import resnet50
def prepare_lstm_jit(bench_args):
model_def = lstm_creator(
script=True,
seqLength=bench_args.lstmSeqLength,
numLayers=bench_args.lstmNumLayers,
inputSize=bench_args.lstmInputSize,
hiddenSize=bench_args.lstmHiddenSize,
miniBatch=bench_args.lstmMiniBatch,
device='cpu')
return model_def.inputs, model_def.forward
def prepare_resnet50_jit(bench_args):
model = resnet50()
inputs = (torch.randn(32, 3, 224, 224),)
model = torch.jit.trace(model, inputs)
return inputs, model
MODELS = {
'resnet50_jit' : prepare_resnet50_jit,
'lstm_jit' : prepare_lstm_jit,
}
NUM_THREADS = [1, 2, 4, 8, 16, 32]
def run_bench(model_names, bench_args):
results = []
for model_name in model_names:
model_creator = MODELS[model_name]
inputs, model = model_creator(bench_args)
print("Benchmarking RecordFunction overhead for", model_name)
print("Running warmup...", end=" ")
sys.stdout.flush()
for _ in range(bench_args.warmup):
model(*inputs)
print("finished")
for num_threads in NUM_THREADS:
for with_rec_fn in [True, False]:
torch.autograd._enable_record_function(with_rec_fn)
torch.autograd._clear_callbacks()
if with_rec_fn:
torch.autograd._set_empty_test_observer(True, 0.0001)
print("Running {} RecordFunction, num threads {} ...".format(
"with" if with_rec_fn else "without", num_threads), end=" ")
sys.stdout.flush()
timer = benchmark_utils.Timer(
stmt="model(*inputs)",
globals={"model": model, "inputs": inputs},
description=model_name,
label="Record function overhead",
sub_label=f"with{'' if with_rec_fn else 'out'}_rec_fn, num_threads {num_threads}",
num_threads=num_threads)
result = timer.blocked_autorange(min_run_time=bench_args.timer_min_run_time)
print("finished")
print(result)
sys.stdout.flush()
results.append(result)
comparison = benchmark_utils.Compare(results)
comparison.trim_significant_figures()
comparison.highlight_warnings()
comparison.print()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Benchmark RecordFunction overhead for ResNet and LSTM models')
parser.add_argument('--models', nargs='*', default=['lstm_jit'],
help='What model to run: ' + str(MODELS.keys()))
parser.add_argument('--lstmSeqLength', default='100', type=int)
parser.add_argument('--lstmNumLayers', default='1', type=int)
parser.add_argument('--lstmInputSize', default='512', type=int)
parser.add_argument('--lstmHiddenSize', default='512', type=int)
parser.add_argument('--lstmMiniBatch', default='64', type=int)
parser.add_argument('--warmup', default='2', type=int)
parser.add_argument('--nloops', default='50', type=int)
parser.add_argument('--timer_min_run_time', default=120, type=int)
args = parser.parse_args()
models = args.models or MODELS.keys()
for model in models:
assert model in MODELS
run_bench(models, args)
| pytorch-master | benchmarks/record_function_benchmark/record_function_bench.py |
import pandas
df = pandas.read_csv("perf.csv")
ops = pandas.unique(df["operator"])
nops = len(ops)
pivot_op_shape = df.pivot_table(
values="time", index=["operator", "shape"], columns=["fuser"]
)
pivot_speedups = (pivot_op_shape.T / pivot_op_shape["eager"]).T
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (20, 100)
fig, axs = plt.subplots(nops)
plt.subplots_adjust(hspace=0.5)
for idx, op in enumerate(ops):
op_speedups = pivot_speedups.T[op].T
op_speedups.plot(ax=axs[idx], kind="bar", ylim=(0, 2), rot=45)
axs[idx].set_title(op)
axs[idx].set_xlabel("")
plt.savefig("perf.png")
| pytorch-master | benchmarks/fuser/plot_speedups.py |
import click
import sys
import time
import torch
import inspect
import itertools
torch.set_num_threads(1)
torch._C._debug_set_fusion_group_inlining(False)
def rand(*shape):
return torch.rand(*shape).mul(16).add(1)
# ------------------------------------------------------------------------------
# Shape test cases
# ------------------------------------------------------------------------------
def scalar():
return (rand(1), rand(1))
def small():
return (rand(32), rand(32))
def small_2d():
return (rand(1, 32), rand(1, 32))
def small_broadcast():
return (rand(4, 32), rand(32))
def medium():
return (rand(32, 12, 64, 64), rand(32, 12, 64, 64))
def medium_sliced():
return (rand(32, 12, 64, 64)[..., ::2], rand(32, 12, 64, 64)[..., ::2])
def medium_transpose():
return (
rand(32, 12, 64, 64).transpose(-1, -2),
rand(32, 12, 64, 64).transpose(-1, -2),
)
def medium2():
return (rand(32, 3, 224, 224), rand(32, 3, 224, 224))
def medium3d():
return (rand(16, 32, 64), rand(16, 32, 64))
def medium_channels_last():
return (
rand(32, 3, 224, 224).to(memory_format=torch.channels_last),
rand(32, 3, 224, 224).to(memory_format=torch.channels_last),
)
def medium_broadcast():
return (rand(32, 12, 64, 64), rand(64))
def medium_broadcast_channels_last():
return (rand(32, 3, 223, 223).to(memory_format=torch.channels_last), rand(3, 1, 1))
def large():
return (rand(8192, 8192), rand(8192, 8192))
def large_transpose():
return (rand(8192, 8192).transpose(0, 1), rand(8192, 8192).transpose(0, 1))
def large_channels_last():
return (
rand(32, 32, 256, 256).to(memory_format=torch.channels_last),
rand(32, 32, 256, 256).to(memory_format=torch.channels_last),
)
def broadcast_narrow_57611():
return (rand(1, 32, 32, 2), rand(1024, 1, 1, 2))
def large_broadcast_66816():
return (rand(64, 8, 256, 162), rand(256, 162))
# ------------------------------------------------------------------------------
# Operator test cases
# ------------------------------------------------------------------------------
def add(a, b):
return 3 * a + b
def sub(a, b):
return 3 * a - b
def mul(a, b):
return 3 * a * b
def div(a, b):
return 3 * a / b
def relu(a):
return (3 * a).relu()
def sigmoid(a):
return (3 * a).sigmoid()
def tanh(a):
return (3 * a).tanh()
def log(a):
return (3 * a).log()
def exp(a):
return (3 * a).exp()
def square(a):
return (3 * a) ** 2
def fma(a, b):
return a * b + b
def mul_mul_add_66816(a, b, c):
return (a * b) + (a * c)
def hardswish_int(a):
return a * (a + 3).clamp(0, 6) / 6
def hardswish(a):
return a * (a + 3).clamp(0.0, 6.0) / 6
def native_hardswish(a):
return torch._C._nn.hardswish(a * 3)
def softplus(a):
return (a * 1.0).exp().log1p() / 1.0
def mish(a):
return a * ((a * 1.0).exp().log1p() / 1.0).tanh()
SHAPES = [
scalar,
small,
small_2d,
small_broadcast,
medium,
medium2,
medium3d,
medium_sliced,
medium_transpose,
medium_channels_last,
medium_broadcast,
medium_broadcast_channels_last,
large,
large_transpose,
large_channels_last,
broadcast_narrow_57611,
large_broadcast_66816,
]
OPERATORS = [
add,
sub,
mul,
div,
relu,
sigmoid,
tanh,
log,
exp,
square,
fma,
mul_mul_add_66816,
hardswish_int,
hardswish,
native_hardswish,
softplus,
mish,
]
def time_cpu(fn, args, iters):
s = time.perf_counter()
for _ in range(iters):
fn(*args)
e = time.perf_counter()
return e - s
def time_cuda(fn, args, iters):
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
for _ in range(iters):
fn(*args)
end.record()
torch.cuda.synchronize()
return start.elapsed_time(end) / 1e3
def benchmark_with_timer(fn, args, timer):
timer(fn, args, 3)
calibration = timer(fn, args, 1)
iters = int(1.0 / calibration)
return timer(fn, args, iters) / iters
def benchmark(fn, args):
timer = time_cpu if args[0].device.type == "cpu" else time_cuda
return benchmark_with_timer(fn, args, timer)
def micros(s):
return f"{s * 1e6:.1f}"
def with_nvfuser():
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(True)
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
def with_nnc():
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
torch._C._jit_set_nvfuser_enabled(False)
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
def with_legacy():
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(False)
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
@click.command()
@click.option("--operators", default=None)
@click.option("--shapes", default=None)
def run_benchmarks(operators, shapes):
if operators is None:
operators = OPERATORS
else:
operators = [globals()[k] for k in operators.split(",")]
if shapes is None:
shapes = SHAPES
else:
shapes = [globals()[k] for k in shapes.split(",")]
print("fuser,device,operator,shape,time")
results = []
for shape, operator in itertools.product(shapes, operators):
nargs = len(inspect.signature(operator).parameters)
args = shape()
if nargs > len(args):
args = list(args)
args += [args[-1]] * (nargs - len(args))
args = args[:nargs]
args = [arg.to("cuda") for arg in args]
result = benchmark(operator, args)
print(
",".join(
[
"eager",
args[0].device.type,
operator.__name__,
shape.__name__,
micros(result),
]
)
)
def bench(name):
nnc_op = torch.jit.trace(operator, args)
result = benchmark(nnc_op, args)
print(
",".join(
[
name,
args[0].device.type,
operator.__name__,
shape.__name__,
micros(result),
]
)
)
sys.stdout.flush()
with_nnc()
bench("nnc")
with_nvfuser()
bench("nvfuser")
with_legacy()
bench("legacy")
if __name__ == "__main__":
run_benchmarks()
| pytorch-master | benchmarks/fuser/run_benchmarks.py |
import argparse
import sys
import torch
from .utils import gen_sparse_csr, gen_sparse_coo, gen_sparse_coo_and_csr, Event
def test_sparse_csr(m, nnz, test_count):
start_timer = Event(enable_timing=True)
stop_timer = Event(enable_timing=True)
csr = gen_sparse_csr((m, m), nnz)
vector = torch.randn(m, dtype=torch.double)
times = []
for _ in range(test_count):
start_timer.record()
csr.matmul(vector)
stop_timer.record()
times.append(start_timer.elapsed_time(stop_timer))
return sum(times) / len(times)
def test_sparse_coo(m, nnz, test_count):
start_timer = Event(enable_timing=True)
stop_timer = Event(enable_timing=True)
coo = gen_sparse_coo((m, m), nnz)
vector = torch.randn(m, dtype=torch.double)
times = []
for _ in range(test_count):
start_timer.record()
coo.matmul(vector)
stop_timer.record()
times.append(start_timer.elapsed_time(stop_timer))
return sum(times) / len(times)
def test_sparse_coo_and_csr(m, nnz, test_count):
start = Event(enable_timing=True)
stop = Event(enable_timing=True)
coo, csr = gen_sparse_coo_and_csr((m, m), nnz)
vector = torch.randn(m, dtype=torch.double)
times = []
for _ in range(test_count):
start.record()
coo.matmul(vector)
stop.record()
times.append(start.elapsed_time(stop))
coo_mean_time = sum(times) / len(times)
times = []
for _ in range(test_count):
start.record()
csr.matmul(vector)
stop.record()
times.append(start.elapsed_time(stop))
csr_mean_time = sum(times) / len(times)
return coo_mean_time, csr_mean_time
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="SpMV")
parser.add_argument("--format", default='csr', type=str)
parser.add_argument("--m", default='1000', type=int)
parser.add_argument("--nnz_ratio", default='0.1', type=float)
parser.add_argument("--outfile", default='stdout', type=str)
parser.add_argument("--test_count", default='10', type=int)
args = parser.parse_args()
if args.outfile == 'stdout':
outfile = sys.stdout
elif args.outfile == 'stderr':
outfile = sys.stderr
else:
outfile = open(args.outfile, "a")
test_count = args.test_count
m = args.m
nnz_ratio = args.nnz_ratio
nnz = int(nnz_ratio * m * m)
if args.format == 'csr':
time = test_sparse_csr(m, nnz, test_count)
elif args.format == 'coo':
time = test_sparse_coo(m, nnz, test_count)
elif args.format == 'both':
time_coo, time_csr = test_sparse_coo_and_csr(m, nnz, test_count)
if args.format != 'both':
print("format=", args.format, " nnz_ratio=", nnz_ratio, " m=", m,
" time=", time, file=outfile)
else:
print("format=coo", " nnz_ratio=", nnz_ratio, " m=", m,
" time=", time_coo, file=outfile)
print("format=csr", " nnz_ratio=", nnz_ratio, " m=", m,
" time=", time_csr, file=outfile)
| pytorch-master | benchmarks/sparse/spmv.py |
if __name__ == "__main__":
pass
| pytorch-master | benchmarks/sparse/__init__.py |
import torch
import functools
import random
import operator
import numpy as np
import time
# shim for torch.cuda.Event when running on cpu
class Event(object):
def __init__(self, enable_timing):
pass
def record(self):
self.time = time.perf_counter()
def elapsed_time(self, end_event):
assert isinstance(end_event, Event)
return end_event.time - self.time
def gen_sparse_csr(shape, nnz):
fill_value = 0
total_values = functools.reduce(operator.mul, shape, 1)
dense = np.random.randn(total_values)
fills = random.sample(list(range(total_values)), total_values - nnz)
for f in fills:
dense[f] = fill_value
dense = torch.from_numpy(dense.reshape(shape))
return dense.to_sparse_csr()
def gen_sparse_coo(shape, nnz):
dense = np.random.randn(*shape)
values = []
indices = [[], []]
for n in range(nnz):
row = random.randint(0, shape[0] - 1)
col = random.randint(0, shape[1] - 1)
indices[0].append(row)
indices[1].append(col)
values.append(dense[row, col])
return torch.sparse_coo_tensor(indices, values, size=shape)
def gen_sparse_coo_and_csr(shape, nnz):
total_values = functools.reduce(operator.mul, shape, 1)
dense = np.random.randn(total_values)
fills = random.sample(list(range(total_values)), total_values - nnz)
for f in fills:
dense[f] = 0
dense = torch.from_numpy(dense.reshape(shape))
return dense.to_sparse(), dense.to_sparse_csr()
| pytorch-master | benchmarks/sparse/utils.py |
import argparse
import sys
import torch
from utils import gen_sparse_csr, gen_sparse_coo, Event
def test_sparse_csr(m, n, k, nnz, test_count):
start_timer = Event(enable_timing=True)
stop_timer = Event(enable_timing=True)
csr = gen_sparse_csr((m, k), nnz)
mat = torch.randn(k, n, dtype=torch.double)
times = []
for _ in range(test_count):
start_timer.record()
csr.matmul(mat)
stop_timer.record()
times.append(start_timer.elapsed_time(stop_timer))
return sum(times) / len(times)
def test_sparse_coo(m, n, k, nnz, test_count):
start_timer = Event(enable_timing=True)
stop_timer = Event(enable_timing=True)
coo = gen_sparse_coo((m, k), nnz)
mat = torch.randn(k, n, dtype=torch.double)
times = []
for _ in range(test_count):
start_timer.record()
coo.matmul(mat)
stop_timer.record()
times.append(start_timer.elapsed_time(stop_timer))
return sum(times) / len(times)
def test_sparse_coo_and_csr(m, n, k, nnz, test_count):
start = Event(enable_timing=True)
stop = Event(enable_timing=True)
coo, csr = gen_sparse_coo_and_csr((m, k), nnz)
mat = torch.randn((k, n), dtype=torch.double)
times = []
for _ in range(test_count):
start.record()
coo.matmul(mat)
stop.record()
times.append(start.elapsed_time(stop))
coo_mean_time = sum(times) / len(times)
times = []
for _ in range(test_count):
start.record()
csr.matmul(mat)
stop.record()
times.append(start.elapsed_time(stop))
csr_mean_time = sum(times) / len(times)
return coo_mean_time, csr_mean_time
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="SpMM")
parser.add_argument("--format", default='csr', type=str)
parser.add_argument("--m", default='1000', type=int)
parser.add_argument("--n", default='1000', type=int)
parser.add_argument("--k", default='1000', type=int)
parser.add_argument("--nnz_ratio", default='0.1', type=float)
parser.add_argument("--outfile", default='stdout', type=str)
parser.add_argument("--test_count", default='10', type=int)
args = parser.parse_args()
if args.outfile == 'stdout':
outfile = sys.stdout
elif args.outfile == 'stderr':
outfile = sys.stderr
else:
outfile = open(args.outfile, "a")
test_count = args.test_count
m = args.m
n = args.n
k = args.k
nnz_ratio = args.nnz_ratio
nnz = int(nnz_ratio * m * k)
if args.format == 'csr':
time = test_sparse_csr(m, n, k, nnz, test_count)
elif args.format == 'coo':
time = test_sparse_coo(m, n, k, nnz, test_count)
elif args.format == 'both':
time_coo, time_csr = test_sparse_coo_and_csr(m, nnz, test_count)
if args.format == 'both':
print("format=coo", " nnz_ratio=", nnz_ratio, " m=", m, " n=", n, " k=", k, " time=", time_coo, file=outfile)
print("format=csr", " nnz_ratio=", nnz_ratio, " m=", m, " n=", n, " k=", k, " time=", time_csr, file=outfile)
else:
print("format=", args.format, " nnz_ratio=", nnz_ratio, " m=", m, " n=", n, " k=", k, " time=", time,
file=outfile)
| pytorch-master | benchmarks/sparse/spmm.py |
if __name__ == "__main__":
pass
| pytorch-master | benchmarks/sparse/dlmc/__init__.py |
# Sparse benchmarks
# This benchmark is for sparse matmul performance test.
# They exist for comparing the performance of sparse matrix routines
# `sparse @ vector`, `sparse @ sparse` and `sparse @ dense` with different backends (CPU/CUDA)
# and with other frameworks such as scipy.
import sys
import argparse
import torch
import torch.utils.benchmark as benchmark_utils
from .utils import load_dlmc_dataset
from scipy.sparse import isspmatrix
import os
def scipy_matmul(mat1, mat2):
if isspmatrix(mat1) and isspmatrix(mat2):
return mat1.dot(mat2).tocoo()
return mat1.dot(mat2)
def matmul_backward(a_dense, b_dense, grad_output):
r1 = a_dense.matmul(b_dense)
r1.backward(grad_output)
def sparse_matmul_backward(a, b, grad_output):
c = torch.sparse.mm(a, b)
c.backward(grad_output)
OPS_MAP = {
"sparse@sparse": "torch.sparse.mm",
"sparse@dense": "torch.matmul",
"sparse@vector": "torch.matmul",
}
# also get the arguments as input from the user using `argparse`
def parse_args():
parser = argparse.ArgumentParser(description='matmul benchmark')
parser.add_argument('--path', type=str, help='DLMC dataset path')
parser.add_argument('--dataset', type=str, default='magnitude_pruning')
parser.add_argument('--hidden_size', default=2048, type=int)
parser.add_argument('--backward_test', action="store_true")
parser.add_argument('--operation', type=str, help="|".join(OPS_MAP.keys()), default=next(iter(OPS_MAP)))
parser.add_argument('--with_cuda', action='store_true')
parser.add_argument('--timer_min_run_time', default=1, type=float)
return parser
def get_tasks(op, backward_test, device):
def filter_ops(operation):
if backward_test:
test_name = device + ":matmul-backward"
return [
(test_name, device, "torch:" + operation.replace("sparse", "dense"),
"matmul_backward(dx, dy, grad_output)"),
(test_name, device, "torch:" + operation, "sparse_matmul_backward(x, y, sparse_grad_output)")
]
else:
test_name = device + ":matmul-forward"
return list(filter(None, [
(test_name, device, "torch:" + operation.replace("sparse", "dense"),
"{}(dx, dy)".format(OPS_MAP[operation])),
(test_name, device, "torch:" + operation, "{}(x, y)".format(OPS_MAP[operation])),
(test_name, device, "scipy:" + operation, "scipy_matmul(sx, sy)") if device == "cpu" else None
]))
all_operations = {
"sparse@sparse": filter_ops("sparse@sparse"),
"sparse@dense": filter_ops("sparse@dense"),
"sparse@vector": filter_ops("sparse@vector"),
}
return all_operations[op]
if __name__ == '__main__':
parser = parse_args()
args = parser.parse_args()
if args.with_cuda and not torch.cuda.is_available():
raise RuntimeError("No CUDA available")
dataset_path = args.path
dataset_name = args.dataset
dataset_path = os.path.join(dataset_path, dataset_name)
device = 'cuda' if args.with_cuda else 'cpu'
tasks = get_tasks(args.operation, args.backward_test, device)
repeats = 3
timers = [
benchmark_utils.Timer(
stmt=stmt,
globals={
"scipy_matmul": scipy_matmul,
"matmul_backward": matmul_backward,
"sparse_matmul_backward": sparse_matmul_backward,
**variables
},
label=label,
sub_label=sub_label,
description=f"{sparsity}",
env=device,
)
for sparsity in [0.5, 0.7, 0.8, 0.9, 0.95, 0.98]
for label, device, sub_label, stmt in tasks
for variables in
load_dlmc_dataset(dataset_path, args.operation, args.hidden_size, sparsity, device, args.backward_test)
]
measurements = []
for i, timer in enumerate(timers * repeats):
m = timer.blocked_autorange(min_run_time=args.timer_min_run_time)
m.metadata = {
"device": 'cuda' if m.task_spec.env.find("cuda") >= 0 else 'cpu'
}
measurements.append(m)
print(f"\r{i + 1} / {len(timers) * repeats}", end="")
sys.stdout.flush()
print()
comparison = benchmark_utils.Compare(measurements)
print("== Results " + "=" * 80 + "\n" + "/" * 95 + "\n")
comparison.print()
| pytorch-master | benchmarks/sparse/dlmc/matmul_bench.py |
import torch
from pathlib import Path
from scipy import sparse
import math
def to_coo_scipy(x):
indices_1 = x._indices().numpy()
values_1 = x._values().numpy()
return sparse.coo_matrix((values_1, (indices_1[0], indices_1[1])),
shape=x.shape)
def sparse_grad_output(a, b):
c = torch.sparse.mm(a, b)
if c.is_sparse:
c2 = torch.rand_like(c.to_dense())
return c2.sparse_mask(c.coalesce())
else:
return torch.rand_like(c)
def read_matrix_params(path):
with open(path, 'r') as file:
line = file.readline()
nrows, ncols, nnz = map(lambda el: int(el), line.split(', '))
return (nrows, ncols), nnz
def csr_to_coo(indices, indptr, shape):
n_rows, n_cols = shape
cols = indices
rows = [0] * len(cols)
for i in range(n_rows):
for j in range(indptr[i], indptr[i + 1]):
rows[j] = i
return torch.tensor([rows, cols], dtype=torch.long)
def load_sparse_matrix(path, device):
with open(path, 'r') as file:
nrows, ncols, nnz = map(lambda el: int(el), file.readline().split(', '))
index_pointers = map(lambda el: int(el), file.readline().split())
indices = map(lambda el: int(el), file.readline().split())
index_pointers = list(index_pointers)
indices = list(indices)
data = torch.randn(nnz, dtype=torch.double)
shape = (nrows, ncols)
return torch.sparse_coo_tensor(csr_to_coo(indices, index_pointers, shape), data, shape, device=device)
def gen_vector(path, device):
with open(path, 'r') as file:
nrows, ncols, nnz = map(lambda el: int(el), file.readline().split(', '))
index_pointers = map(lambda el: int(el), file.readline().split())
indices = map(lambda el: int(el), file.readline().split())
return torch.randn(nrows, dtype=torch.double, device=device)
def gen_matrix(path, device):
with open(path, 'r') as file:
nrows, ncols, nnz = map(lambda el: int(el), file.readline().split(', '))
index_pointers = map(lambda el: int(el), file.readline().split())
indices = map(lambda el: int(el), file.readline().split())
return torch.randn(nrows, ncols, dtype=torch.double, device=device)
def load_spmv_dataset(dataset_path, hidden_size, sparsity, device, n_limit=math.inf):
"""load_spmv_dataset loads a DLMC dataset for a sparse matrix-vector multiplication (SPMV) performance test.
Args:
dataset_path:
path of the dataset from DLMC collection.
hidden_size
This value allows tensors of varying sizes.
sparsity:
This value allows tensors of varying sparsities.
device:
Whether to place the Tensor on a GPU or CPU.
n_limit:
This value allows a dataset with some limit size.
"""
current_folder_path = f"{dataset_path}/{sparsity}"
path = Path(current_folder_path)
files = path.glob('**/*.smtx')
print(dataset_path, hidden_size, sparsity)
index = 0
x_files, y_files = [], []
for f in files:
if index >= n_limit:
break
print('.', end='')
size, nnz = read_matrix_params(f.as_posix())
if size[1] == hidden_size:
x_files.append(f.as_posix())
if size[0] == hidden_size:
y_files.append(f.as_posix())
index += 1
print()
for fx, fy in zip(x_files, y_files):
x = load_sparse_matrix(fx, device)
y = gen_vector(fy, device)
yield (x, y)
def load_spmm_dataset(dataset_path, hidden_size, sparsity, spmm_type, device, n_limit=math.inf):
"""load_spmm_dataset loads a DLMC dataset for a sparse matrix-matrix multiplication (SPMM) performance test.
Args:
dataset_path:
path of the dataset from DLMC collection.
hidden_size
This value allows tensors of varying sizes.
sparsity:
This value allows tensors of varying sparsities.
spmm_type:
This value allows tensors for `sparse@sparse` or `sparse@dense` operations.
device:
Whether to place the Tensor on a GPU or CPU.
n_limit:
This value allows a dataset with some limit size.
"""
current_folder_path = f"{dataset_path}/{sparsity}"
path = Path(current_folder_path)
files = path.glob('**/*.smtx')
print(dataset_path, hidden_size, sparsity)
index = 0
x_files, y_files = [], []
for f in files:
if index >= n_limit:
break
print('.', end='')
size, nnz = read_matrix_params(f.as_posix())
if size[1] == hidden_size:
x_files.append(f.as_posix())
if size[0] == hidden_size:
y_files.append(f.as_posix())
index += 1
print()
for fx, fy in zip(x_files, y_files):
x = load_sparse_matrix(fx, device)
y = gen_matrix(fy, device) if spmm_type == 'sparse@dense' else load_sparse_matrix(fy, device)
yield (x, y)
def load_dlmc_dataset(dataset_path, operation, hidden_size, sparsity, device, requires_grad, n_limit=math.inf):
"""load_dlmc_dataset loads a DLMC dataset for a matmul performance test.
Args:
dataset_path:
path of the dataset from DLMC collection.
operation:
This value allows tensors for `sparse@sparse`|`sparse@dense`|`sparse@vector` operations.
hidden_size
This value allows tensors of varying sizes.
sparsity:
This value allows tensors of varying sparsities.
device:
Whether to place the Tensor on a GPU or CPU.
requires_grad:
Loads the dataset for backward test.
n_limit:
This value allows a dataset with some limit size.
"""
if operation == 'sparse@sparse' or operation == "sparse@dense":
collection = load_spmm_dataset(dataset_path, hidden_size, sparsity, operation, device, n_limit)
elif operation == 'sparse@vector':
collection = load_spmv_dataset(dataset_path, hidden_size, sparsity, device, n_limit)
scipy_vars = {}
backward_vars = {}
for x, y in collection:
if device == 'cpu':
scipy_vars = {
"sx": to_coo_scipy(x) if x.is_sparse else x.numpy(),
"sy": to_coo_scipy(y) if y.is_sparse else y.numpy(),
}
if not requires_grad:
dx = x.to_dense() if x.is_sparse else x
dy = y.to_dense() if y.is_sparse else y
else:
c = sparse_grad_output(x, y)
backward_vars = {
"sparse_grad_output": c,
"grad_output": c.to_dense() if c.is_sparse else c,
}
x.requires_grad_(True)
y.requires_grad_(True)
dx = x.to_dense().detach() if x.is_sparse else x.clone().detach()
dy = y.to_dense().detach() if y.is_sparse else y.clone().detach()
dx.requires_grad_(True)
dy.requires_grad_(True)
yield {
"x": x,
"y": y,
"dx": dx,
"dy": dy,
**scipy_vars,
**backward_vars
}
| pytorch-master | benchmarks/sparse/dlmc/utils.py |
import argparse
import sys
import timeit
import torch
from torch.utils.benchmark import Timer
PARALLEL_TASKS_NUM = 4
INTERNAL_ITER = None
def loop_workload(x):
for i in range(INTERNAL_ITER):
x = torch.mm(x, x)
return x
def parallel_workload(x):
def parallel_task(x):
for i in range(int(INTERNAL_ITER / PARALLEL_TASKS_NUM)):
x = torch.mm(x, x)
return x
futs = []
for i in range(PARALLEL_TASKS_NUM):
futs.append(torch.jit._fork(parallel_task, x))
for i in range(PARALLEL_TASKS_NUM):
torch.jit._wait(futs[i])
return x
if __name__ == '__main__':
torch._C._set_graph_executor_optimize(False)
parser = argparse.ArgumentParser(
description='Profiler benchmark')
parser.add_argument('--with_cuda', action='store_true')
parser.add_argument('--with_stack', action='store_true')
parser.add_argument('--use_script', action='store_true')
parser.add_argument('--use_kineto', action='store_true')
parser.add_argument('--profiling_tensor_size', default=1, type=int)
parser.add_argument('--workload', default='loop', type=str)
parser.add_argument('--internal_iter', default=256, type=int)
parser.add_argument('--timer_min_run_time', default=10, type=int)
parser.add_argument('--cuda_only', action='store_true')
args = parser.parse_args()
if args.with_cuda and not torch.cuda.is_available():
print("No CUDA available")
sys.exit()
print("Payload: {}, {} iterations; timer min. runtime = {}\n".format(
args.workload, args.internal_iter, args.timer_min_run_time))
INTERNAL_ITER = args.internal_iter
for profiling_enabled in [False, True]:
print("Profiling {}, tensor size {}x{}, use cuda: {}, use kineto: {}, with stacks: {}, use script: {}".format(
"enabled" if profiling_enabled else "disabled",
args.profiling_tensor_size,
args.profiling_tensor_size,
args.with_cuda,
args.use_kineto,
args.with_stack,
args.use_script))
input_x = torch.rand(
args.profiling_tensor_size,
args.profiling_tensor_size)
if args.with_cuda:
input_x = input_x.cuda()
workload = None
assert args.workload in ["loop", "parallel"]
if args.workload == "loop":
workload = loop_workload
else:
workload = parallel_workload
if args.use_script:
traced_workload = torch.jit.trace(workload, (input_x,))
workload = traced_workload
if profiling_enabled:
def payload():
x = None
with torch.autograd.profiler.profile(
use_cuda=args.with_cuda,
with_stack=args.with_stack,
use_kineto=args.use_kineto,
use_cpu=not args.cuda_only) as prof:
x = workload(input_x)
return x
else:
def payload():
return workload(input_x)
t = Timer(
"payload()",
globals={"payload": payload},
timer=timeit.default_timer,
).blocked_autorange(min_run_time=args.timer_min_run_time)
print(t)
| pytorch-master | benchmarks/profiler_benchmark/profiler_bench.py |
import torch
import torchvision.models as models
import torch.autograd.profiler as profiler
for with_cuda in [False, True]:
model = models.resnet18()
inputs = torch.randn(5, 3, 224, 224)
sort_key = "self_cpu_memory_usage"
if with_cuda and torch.cuda.is_available():
model = model.cuda()
inputs = inputs.cuda()
sort_key = "self_cuda_memory_usage"
print("Profiling CUDA Resnet model")
else:
print("Profiling CPU Resnet model")
with profiler.profile(profile_memory=True, record_shapes=True) as prof:
with profiler.record_function("root"):
model(inputs)
print(prof.key_averages(group_by_input_shape=True).table(sort_by=sort_key, row_limit=-1))
| pytorch-master | benchmarks/profiler_benchmark/resnet_memory_profiler.py |
"""
This is a script for PyTorch Android custom selective build test. It prepares
MobileNetV2 TorchScript model, and dumps root ops used by the model for custom
build script to create a tailored build which only contains these used ops.
"""
import torch
import torchvision
import yaml
# Download and trace the model.
model = torchvision.models.mobilenet_v2(pretrained=True)
model.eval()
example = torch.rand(1, 3, 224, 224)
# TODO: create script model with `torch.jit.script`
traced_script_module = torch.jit.trace(model, example)
# Save traced TorchScript model.
traced_script_module.save("MobileNetV2.pt")
# Dump root ops used by the model (for custom build optimization).
ops = torch.jit.export_opnames(traced_script_module)
with open('MobileNetV2.yaml', 'w') as output:
yaml.dump(ops, output)
| pytorch-master | android/test_app/make_assets_custom.py |
import torch
import torchvision
print(torch.version.__version__)
resnet18 = torchvision.models.resnet18(pretrained=True)
resnet18.eval()
resnet18_traced = torch.jit.trace(resnet18, torch.rand(1, 3, 224, 224)).save("app/src/main/assets/resnet18.pt")
resnet50 = torchvision.models.resnet50(pretrained=True)
resnet50.eval()
torch.jit.trace(resnet50, torch.rand(1, 3, 224, 224)).save("app/src/main/assets/resnet50.pt")
mobilenet2q = torchvision.models.quantization.mobilenet_v2(pretrained=True, quantize=True)
mobilenet2q.eval()
torch.jit.trace(mobilenet2q, torch.rand(1, 3, 224, 224)).save("app/src/main/assets/mobilenet2q.pt")
| pytorch-master | android/test_app/make_assets.py |
import torch
from torch import Tensor
from typing import Dict, List, Tuple, Optional
OUTPUT_DIR = "src/androidTest/assets/"
def scriptAndSave(module, fileName):
print('-' * 80)
script_module = torch.jit.script(module)
print(script_module.graph)
outputFileName = OUTPUT_DIR + fileName
# note that the lite interpreter model can also be used in full JIT
script_module._save_for_lite_interpreter(outputFileName)
print("Saved to " + outputFileName)
print('=' * 80)
class Test(torch.jit.ScriptModule):
def __init__(self):
super(Test, self).__init__()
@torch.jit.script_method
def forward(self, input):
return None
@torch.jit.script_method
def eqBool(self, input: bool) -> bool:
return input
@torch.jit.script_method
def eqInt(self, input: int) -> int:
return input
@torch.jit.script_method
def eqFloat(self, input: float) -> float:
return input
@torch.jit.script_method
def eqStr(self, input: str) -> str:
return input
@torch.jit.script_method
def eqTensor(self, input: Tensor) -> Tensor:
return input
@torch.jit.script_method
def eqDictStrKeyIntValue(self, input: Dict[str, int]) -> Dict[str, int]:
return input
@torch.jit.script_method
def eqDictIntKeyIntValue(self, input: Dict[int, int]) -> Dict[int, int]:
return input
@torch.jit.script_method
def eqDictFloatKeyIntValue(self, input: Dict[float, int]) -> Dict[float, int]:
return input
@torch.jit.script_method
def listIntSumReturnTuple(self, input: List[int]) -> Tuple[List[int], int]:
sum = 0
for x in input:
sum += x
return (input, sum)
@torch.jit.script_method
def listBoolConjunction(self, input: List[bool]) -> bool:
res = True
for x in input:
res = res and x
return res
@torch.jit.script_method
def listBoolDisjunction(self, input: List[bool]) -> bool:
res = False
for x in input:
res = res or x
return res
@torch.jit.script_method
def tupleIntSumReturnTuple(self, input: Tuple[int, int, int]) -> Tuple[Tuple[int, int, int], int]:
sum = 0
for x in input:
sum += x
return (input, sum)
@torch.jit.script_method
def optionalIntIsNone(self, input: Optional[int]) -> bool:
return input is None
@torch.jit.script_method
def intEq0None(self, input: int) -> Optional[int]:
if input == 0:
return None
return input
@torch.jit.script_method
def str3Concat(self, input: str) -> str:
return input + input + input
@torch.jit.script_method
def newEmptyShapeWithItem(self, input):
return torch.tensor([int(input.item())])[0]
@torch.jit.script_method
def testAliasWithOffset(self) -> List[Tensor]:
x = torch.tensor([100, 200])
a = [x[0], x[1]]
return a
@torch.jit.script_method
def testNonContiguous(self):
x = torch.tensor([100, 200, 300])[::2]
assert not x.is_contiguous()
assert x[0] == 100
assert x[1] == 300
return x
@torch.jit.script_method
def conv2d(self, x: Tensor, w: Tensor, toChannelsLast: bool) -> Tensor:
r = torch.nn.functional.conv2d(x, w)
if (toChannelsLast):
r = r.contiguous(memory_format=torch.channels_last)
else:
r = r.contiguous()
return r
@torch.jit.script_method
def contiguous(self, x: Tensor) -> Tensor:
return x.contiguous()
@torch.jit.script_method
def contiguousChannelsLast(self, x: Tensor) -> Tensor:
return x.contiguous(memory_format=torch.channels_last)
@torch.jit.script_method
def contiguousChannelsLast3d(self, x: Tensor) -> Tensor:
return x.contiguous(memory_format=torch.channels_last_3d)
scriptAndSave(Test(), "test.pt")
| pytorch-master | android/pytorch_android/generate_test_torchscripts.py |
## @package diagnose_protobuf
# Module scripts.diagnose_protobuf
"""Diagnoses the current protobuf situation.
Protocol buffer needs to be properly installed for Caffe2 to work, and
sometimes it is rather tricky. Specifically, we will need to have a
consistent version between C++ and python simultaneously. This is a
convenience script for one to quickly check if this is so on one's local
machine.
Usage:
[set your environmental variables like PATH and PYTHONPATH]
python scripts/diagnose_protobuf.py
"""
import os
import re
from subprocess import Popen, PIPE
# Get python protobuf version.
try:
import google.protobuf
python_version = google.protobuf.__version__
python_protobuf_installed = True
except ImportError:
print("DEBUG: cannot find python protobuf install.")
python_protobuf_installed = False
if os.name == 'nt':
protoc_name = 'protoc.exe'
else:
protoc_name = 'protoc'
try:
p = Popen([protoc_name, '--version'], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
except:
print('DEBUG: did not find protoc binary.')
print('DEBUG: out: ' + out)
print('DEBUG: err: ' + err)
native_protobuf_installed = False
else:
if p.returncode:
print('DEBUG: protoc returned a non-zero return code.')
print('DEBUG: out: ' + out)
print('DEBUG: err: ' + err)
native_protobuf_installed = False
else:
tmp = re.search(r'\d\.\d\.\d', out)
if tmp:
native_version = tmp.group(0)
native_protobuf_installed = True
else:
print('DEBUG: cannot parse protoc version string.')
print('DEBUG: out: ' + out)
native_protobuf_installed = False
PYTHON_PROTOBUF_NOT_INSTALLED = """
You have not installed python protobuf. Protobuf is needed to run caffe2. You
can install protobuf via pip or conda (if you are using anaconda python).
"""
NATIVE_PROTOBUF_NOT_INSTALLED = """
You have not installed the protoc binary. Protoc is needed to compile Caffe2
protobuf source files. Depending on the platform you are on, you can install
protobuf via:
(1) Mac: using homebrew and do brew install protobuf.
(2) Linux: use apt and do apt-get install libprotobuf-dev
(3) Windows: install from source, or from the releases here:
https://github.com/google/protobuf/releases/
"""
VERSION_MISMATCH = """
Your python protobuf is of version {py_ver} but your native protoc version is of
version {native_ver}. This will cause the installation to produce incompatible
protobuf files. This is bad in general - consider installing the same version.
""".format(py_ver=python_version, native_ver=native_version)
# Now, give actual recommendations
if not python_protobuf_installed:
print(PYTHON_PROTOBUF_NOT_INSTALLED)
if not native_protobuf_installed:
print(NATIVE_PROTOBUF_NOT_INSTALLED)
if python_protobuf_installed and native_protobuf_installed:
if python_version != native_version:
print(VERSION_MISMATCH)
else:
print('All looks good.')
| pytorch-master | scripts/diagnose_protobuf.py |
## @package get_python_cmake_flags
# Module scripts.get_python_cmake_flags
##############################################################################
# Use this script to find your preferred python installation.
##############################################################################
#
# You can use the following to build with your preferred version of python
# if your installation is not being properly detected by CMake.
#
# mkdir -p build && cd build
# cmake $(python ../scripts/get_python_cmake_flags.py) ..
# make
#
import sysconfig
import sys
flags = [
'-DPYTHON_EXECUTABLE:FILEPATH={}'.format(sys.executable),
'-DPYTHON_INCLUDE_DIR={}'.format(sysconfig.get_path('include')),
]
print(' '.join(flags), end='')
| pytorch-master | scripts/get_python_cmake_flags.py |
import unittest
import tempfile
from commitlist import CommitList
class TestCommitList(unittest.TestCase):
def test_create_new(self):
with tempfile.TemporaryDirectory() as tempdir:
commit_list_path = f'{tempdir}/commitlist.csv'
commit_list = CommitList.create_new(commit_list_path, 'v1.5.0', '6000dca5df')
self.assertEqual(len(commit_list.commits), 33)
self.assertEqual(commit_list.commits[0].commit_hash, '7335f079ab')
self.assertTrue(commit_list.commits[0].title.startswith('[pt][quant] qmul and qadd'))
self.assertEqual(commit_list.commits[-1].commit_hash, '6000dca5df')
self.assertTrue(commit_list.commits[-1].title.startswith('[nomnigraph] Copy device option when customize '))
def test_read_write(self):
with tempfile.TemporaryDirectory() as tempdir:
commit_list_path = f'{tempdir}/commitlist.csv'
initial = CommitList.create_new(commit_list_path, 'v1.5.0', '7543e7e558')
initial.write_to_disk()
expected = CommitList.from_existing(commit_list_path)
expected.commits[-2].category = 'foobar'
expected.write_to_disk()
commit_list = CommitList.from_existing(commit_list_path)
for commit, expected in zip(commit_list.commits, expected.commits):
self.assertEqual(commit, expected)
def test_update_to(self):
with tempfile.TemporaryDirectory() as tempdir:
commit_list_path = f'{tempdir}/commitlist.csv'
initial = CommitList.create_new(commit_list_path, 'v1.5.0', '7543e7e558')
initial.commits[-2].category = 'foobar'
self.assertEqual(len(initial.commits), 2143)
initial.write_to_disk()
commit_list = CommitList.from_existing(commit_list_path)
commit_list.update_to('5702a28b26')
self.assertEqual(len(commit_list.commits), 2143 + 4)
self.assertEqual(commit_list.commits[-5], initial.commits[-1])
if __name__ == '__main__':
unittest.main()
| pytorch-master | scripts/release_notes/test_release_notes.py |
from collections import namedtuple
from pathlib import Path
import locale
import subprocess
import re
import requests
import os
import json
categories = [
'Uncategorized',
'distributed',
'lazy',
'hub',
'mobile',
'jit',
'visualization',
'onnx',
'caffe2',
'quantization',
'amd',
'rocm',
'cuda',
'cudnn',
'benchmark',
'profiler',
'performance_as_product',
'package',
'dispatcher',
'releng',
'fx',
'code_coverage',
'vulkan',
'skip',
'composability',
'meta_frontend',
'nn_frontend',
'linalg_frontend',
'cpp_frontend',
'python_frontend',
'complex_frontend',
'vmap_frontend',
'autograd_frontend',
'build_frontend',
'memory_format_frontend',
'foreach_frontend',
'dataloader_frontend',
'sparse_frontend'
]
topics = [
'bc_breaking',
'deprecations',
'new_features',
'improvements',
'bug_fixes',
'performance',
'docs',
'devs',
'Untopiced',
"not user facing",
"security",
]
Features = namedtuple('Features', [
'title',
'body',
'pr_number',
'files_changed',
'labels',
'author',
'accepters'
])
def dict_to_features(dct):
return Features(
title=dct['title'],
body=dct['body'],
pr_number=dct['pr_number'],
files_changed=dct['files_changed'],
labels=dct['labels'],
author=dct['author'],
accepters=tuple(dct['accepters']))
def features_to_dict(features):
return dict(features._asdict())
def run(command):
"""Returns (return-code, stdout, stderr)"""
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
output, err = p.communicate()
rc = p.returncode
enc = locale.getpreferredencoding()
output = output.decode(enc)
err = err.decode(enc)
return rc, output.strip(), err.strip()
def commit_body(commit_hash):
cmd = f'git log -n 1 --pretty=format:%b {commit_hash}'
ret, out, err = run(cmd)
return out if ret == 0 else None
def commit_title(commit_hash):
cmd = f'git log -n 1 --pretty=format:%s {commit_hash}'
ret, out, err = run(cmd)
return out if ret == 0 else None
def commit_files_changed(commit_hash):
cmd = f'git diff-tree --no-commit-id --name-only -r {commit_hash}'
ret, out, err = run(cmd)
return out.split('\n') if ret == 0 else None
def parse_pr_number(body, commit_hash, title):
regex = r'Pull Request resolved: https://github.com/pytorch/pytorch/pull/([0-9]+)'
matches = re.findall(regex, body)
if len(matches) == 0:
if 'revert' not in title.lower() and 'updating submodules' not in title.lower():
print(f'[{commit_hash}: {title}] Could not parse PR number, ignoring PR')
return None
if len(matches) > 1:
print(f'[{commit_hash}: {title}] Got two PR numbers, using the first one')
return matches[0]
return matches[0]
def get_ghstack_token():
pattern = 'github_oauth = (.*)'
with open(Path('~/.ghstackrc').expanduser(), 'r+') as f:
config = f.read()
matches = re.findall(pattern, config)
if len(matches) == 0:
raise RuntimeError("Can't find a github oauth token")
return matches[0]
token = get_ghstack_token()
headers = {"Authorization": f"token {token}"}
def run_query(query):
request = requests.post('https://api.github.com/graphql', json={'query': query}, headers=headers)
if request.status_code == 200:
return request.json()
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, query))
def github_data(pr_number):
query = """
{
repository(owner: "pytorch", name: "pytorch") {
pullRequest(number: %s ) {
author {
login
}
reviews(last: 5, states: APPROVED) {
nodes {
author {
login
}
}
}
labels(first: 10) {
edges {
node {
name
}
}
}
}
}
}
""" % pr_number
query = run_query(query)
edges = query['data']['repository']['pullRequest']['labels']['edges']
labels = [edge['node']['name'] for edge in edges]
author = query['data']['repository']['pullRequest']['author']['login']
nodes = query['data']['repository']['pullRequest']['reviews']['nodes']
# using set to dedup multiple accepts from same accepter
accepters = {node["author"]["login"] for node in nodes}
accepters = tuple(sorted(accepters))
return labels, author, accepters
def get_features(commit_hash):
title, body, files_changed = (
commit_title(commit_hash),
commit_body(commit_hash),
commit_files_changed(commit_hash))
pr_number = parse_pr_number(body, commit_hash, title)
labels = []
author = ""
accepters = tuple()
if pr_number is not None:
labels, author, accepters = github_data(pr_number)
result = Features(title, body, pr_number, files_changed, labels, author, accepters)
return result
_commit_data_cache = None
def get_commit_data_cache(path='results/data.json'):
global _commit_data_cache
if _commit_data_cache is None:
_commit_data_cache = _CommitDataCache(path)
return _commit_data_cache
class _CommitDataCache:
def __init__(self, path):
self.path = path
self.data = {}
if os.path.exists(path):
self.data = self.read_from_disk()
else:
os.makedirs(Path(path).parent, exist_ok=True)
def get(self, commit):
if commit not in self.data.keys():
# Fetch and cache the data
self.data[commit] = get_features(commit)
self.write_to_disk()
return self.data[commit]
def read_from_disk(self):
with open(self.path, 'r') as f:
data = json.load(f)
data = {commit: dict_to_features(dct)
for commit, dct in data.items()}
return data
def write_to_disk(self):
data = {commit: features._asdict() for commit, features in self.data.items()}
with open(self.path, 'w') as f:
json.dump(data, f)
| pytorch-master | scripts/release_notes/common.py |
# Quick scipt to apply categorized items to the
# base commitlist . Useful if you are refactoring any code
# but want to keep the previous data on categories
import commitlist
import csv
category_csv = "results/category_data.csv"
commitlist_csv = "results/commitlist.csv"
with open(category_csv, "r") as category_data:
reader = csv.DictReader(category_data, commitlist.commit_fields)
rows = list(reader)
category_map = {row["commit_hash"]: row["category"] for row in rows}
with open(commitlist_csv, "r") as commitlist_data:
reader = csv.DictReader(commitlist_data, commitlist.commit_fields)
commitlist_rows = list(reader)
for row in commitlist_rows:
hash = row["commit_hash"]
if hash in category_map and category_map[hash] != "Uncategorized":
row["category"] = category_map[hash]
with open(commitlist_csv, "w") as commitlist_write:
writer = csv.DictWriter(commitlist_write, commitlist.commit_fields)
writer.writeheader()
writer.writerows(commitlist_rows)
| pytorch-master | scripts/release_notes/apply_categories.py |
import argparse
import os
import textwrap
from common import categories, topics, get_commit_data_cache
from commitlist import CommitList
class Categorizer:
def __init__(self, path, category='Uncategorized'):
self.cache = get_commit_data_cache()
self.commits = CommitList.from_existing(path)
# Special categories: 'Uncategorized'
# All other categories must be real
self.category = category
def categorize(self):
commits = self.commits.filter(category=self.category)
total_commits = len(self.commits.commits)
already_done = total_commits - len(commits)
i = 0
while i < len(commits):
cur_commit = commits[i]
next_commit = commits[i + 1] if i + 1 < len(commits) else None
jump_to = self.handle_commit(cur_commit, already_done + i + 1, total_commits, commits)
# Increment counter
if jump_to is not None:
i = jump_to
elif next_commit is None:
i = len(commits)
else:
i = commits.index(next_commit)
def features(self, commit):
return self.cache.get(commit.commit_hash)
def potential_reverts_of(self, commit, commits):
submodule_update_str = ['Update TensorPipe submodule',
'Updating submodules',
'Automated submodule update']
if any(a in commit.title for a in submodule_update_str):
return []
features = self.features(commit)
if 'Reverted' in features.labels:
reasons = {'GithubBot': "Reverted"}
else:
reasons = {}
index = commits.index(commit)
# -8 to remove the (#35011)
cleaned_title = commit.title[:-10]
# NB: the index + 2 is sketch
reasons.update({(index + 2 + delta): cand for delta, cand in enumerate(commits[index + 1:])
if cleaned_title in cand.title and
commit.commit_hash != cand.commit_hash})
return reasons
def handle_commit(self, commit, i, total, commits):
potential_reverts = self.potential_reverts_of(commit, commits)
if potential_reverts:
potential_reverts = f'!!!POTENTIAL REVERTS!!!: {potential_reverts}'
else:
potential_reverts = ""
features = self.features(commit)
breaking_alarm = ""
if 'module: bc-breaking' in features.labels:
breaking_alarm += "\n!!!!!! BC BREAKING !!!!!!"
if 'module: deprecation' in features.labels:
breaking_alarm += "\n!!!!!! DEPRECATION !!!!!!"
os.system('clear')
view = textwrap.dedent(f'''\
[{i}/{total}]
================================================================================
{features.title}
{potential_reverts} {breaking_alarm}
{features.body}
Files changed: {features.files_changed}
Labels: {features.labels}
Current category: {commit.category}
Select from: {', '.join(categories)}
''')
print(view)
cat_choice = None
while cat_choice is None:
value = input('category> ').strip()
if len(value) == 0:
cat_choice = commit.category
continue
choices = [cat for cat in categories
if cat.startswith(value)]
if len(choices) != 1:
print(f'Possible matches: {choices}, try again')
continue
cat_choice = choices[0]
print(f'\nSelected: {cat_choice}')
print(f'\nCurrent topic: {commit.topic}')
print(f'''Select from: {', '.join(topics)}''')
topic_choice = None
while topic_choice is None:
value = input('topic> ').strip()
if len(value) == 0:
topic_choice = commit.topic
continue
choices = [cat for cat in topics
if cat.startswith(value)]
if len(choices) != 1:
print(f'Possible matches: {choices}, try again')
continue
topic_choice = choices[0]
print(f'\nSelected: {topic_choice}')
self.update_commit(commit, cat_choice, topic_choice)
return None
def update_commit(self, commit, category, topic):
assert category in categories
assert topic in topics
commit.category = category
commit.topic = topic
self.commits.write_to_disk()
def main():
parser = argparse.ArgumentParser(description='Tool to help categorize commits')
parser.add_argument('--category', type=str, default='Uncategorized',
help='Which category to filter by. "Uncategorized", None, or a category name')
parser.add_argument('--file', help='The location of the commits CSV',
default='results/commitlist.csv')
args = parser.parse_args()
categorizer = Categorizer(args.file, args.category)
categorizer.categorize()
if __name__ == '__main__':
main()
| pytorch-master | scripts/release_notes/categorize.py |
import argparse
import torch
from os import path
import json
# Import all utils so that getattr below can find them
from torch.utils import bottleneck, checkpoint, model_zoo
all_submod_list = [
"",
"nn",
"nn.functional",
"nn.init",
"optim",
"autograd",
"cuda",
"sparse",
"distributions",
"fft",
"linalg",
"jit",
"distributed",
"futures",
"onnx",
"random",
"utils.bottleneck",
"utils.checkpoint",
"utils.data",
"utils.model_zoo",
]
def get_content(submod):
mod = torch
if submod:
submod = submod.split(".")
for name in submod:
mod = getattr(mod, name)
content = dir(mod)
return content
def namespace_filter(data):
out = set(d for d in data if d[0] != "_")
return out
def run(args, submod):
print(f"## Processing torch.{submod}")
prev_filename = f"prev_data_{submod}.json"
new_filename = f"new_data_{submod}.json"
if args.prev_version:
content = get_content(submod)
with open(prev_filename, "w") as f:
json.dump(content, f)
print("Data saved for previous version.")
elif args.new_version:
content = get_content(submod)
with open(new_filename, "w") as f:
json.dump(content, f)
print("Data saved for new version.")
else:
assert args.compare
if not path.exists(prev_filename):
raise RuntimeError("Previous version data not collected")
if not path.exists(new_filename):
raise RuntimeError("New version data not collected")
with open(prev_filename, "r") as f:
prev_content = set(json.load(f))
with open(new_filename, "r") as f:
new_content = set(json.load(f))
if not args.show_all:
prev_content = namespace_filter(prev_content)
new_content = namespace_filter(new_content)
if new_content == prev_content:
print("Nothing changed.")
print("")
else:
print("Things that were added:")
print(new_content - prev_content)
print("")
print("Things that were removed:")
print(prev_content - new_content)
print("")
def main():
parser = argparse.ArgumentParser(description='Tool to check namespace content changes')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--prev-version', action='store_true')
group.add_argument('--new-version', action='store_true')
group.add_argument('--compare', action='store_true')
group = parser.add_mutually_exclusive_group()
group.add_argument('--submod', default='', help='part of the submodule to check')
group.add_argument('--all-submod', action='store_true', help='collects data for all main submodules')
parser.add_argument('--show-all', action='store_true', help='show all the diff, not just public APIs')
args = parser.parse_args()
if args.all_submod:
submods = all_submod_list
else:
submods = [args.submod]
for mod in submods:
run(args, mod)
if __name__ == '__main__':
main()
| pytorch-master | scripts/release_notes/namespace_check.py |
import argparse
from common import run, topics, get_features
from collections import defaultdict
import os
from pathlib import Path
import csv
import pprint
from common import get_commit_data_cache, features_to_dict
import re
import dataclasses
from typing import List
"""
Example Usages
Create a new commitlist for consumption by categorize.py.
Said commitlist contains commits between v1.5.0 and f5bc91f851.
python commitlist.py --create_new tags/v1.5.0 f5bc91f851
Update the existing commitlist to commit bfcb687b9c.
python commitlist.py --update_to bfcb687b9c
"""
@dataclasses.dataclass(frozen=True)
class Commit:
commit_hash: str
category: str
topic: str
title: str
pr_link: str
author: str
# This is not a list so that it is easier to put in a spreadsheet
accepter_1: str
accepter_2: str
accepter_3: str
merge_into: str = None
def __repr__(self):
return f'Commit({self.commit_hash}, {self.category}, {self.topic}, {self.title})'
commit_fields = tuple(f.name for f in dataclasses.fields(Commit))
class CommitList:
# NB: Private ctor. Use `from_existing` or `create_new`.
def __init__(self, path: str, commits: List[Commit]):
self.path = path
self.commits = commits
@staticmethod
def from_existing(path):
commits = CommitList.read_from_disk(path)
return CommitList(path, commits)
@staticmethod
def create_new(path, base_version, new_version):
if os.path.exists(path):
raise ValueError('Attempted to create a new commitlist but one exists already!')
commits = CommitList.get_commits_between(base_version, new_version)
return CommitList(path, commits)
@staticmethod
def read_from_disk(path) -> List[Commit]:
with open(path) as csvfile:
reader = csv.DictReader(csvfile)
rows = []
for row in reader:
if row.get("new_title", "") != "":
row["title"] = row["new_title"]
filtered_rows = {k: row.get(k, "") for k in commit_fields}
rows.append(Commit(**filtered_rows))
return rows
def write_result(self):
self.write_to_disk_static(self.path, self.commits)
@staticmethod
def write_to_disk_static(path, commit_list):
os.makedirs(Path(path).parent, exist_ok=True)
with open(path, 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(commit_fields)
for commit in commit_list:
writer.writerow(dataclasses.astuple(commit))
def keywordInFile(file, keywords):
for key in keywords:
if key in file:
return True
return False
@staticmethod
def gen_commit(commit_hash):
feature_item = get_commit_data_cache().get(commit_hash)
features = features_to_dict(feature_item)
category, topic = CommitList.categorize(features)
a1, a2, a3 = (features["accepters"] + ("", "", ""))[:3]
if features["pr_number"] is not None:
pr_link = f"https://github.com/pytorch/pytorch/pull/{features['pr_number']}"
else:
pr_link = None
return Commit(commit_hash, category, topic, features["title"], pr_link, features["author"], a1, a2, a3)
@staticmethod
def categorize(features):
title = features['title']
labels = features['labels']
category = 'Uncategorized'
topic = 'Untopiced'
# We ask contributors to label their PR's appropriately
# when they're first landed.
# Check if the labels are there first.
already_categorized = already_topiced = False
for label in labels:
if label.startswith('release notes: '):
category = label.split('release notes: ', 1)[1]
already_categorized = True
if label.startswith('topic: '):
topic = label.split('topic: ', 1)[1]
already_topiced = True
if already_categorized and already_topiced:
return category, topic
# update this to check if each file starts with caffe2
if 'caffe2' in title:
return 'caffe2', topic
if '[codemod]' in title.lower():
return 'skip', topic
if 'Reverted' in labels:
return 'skip', topic
if 'bc_breaking' in labels:
topic = 'bc-breaking'
if 'module: deprecation' in labels:
topic = 'deprecation'
files_changed = features['files_changed']
for file in files_changed:
file_lowercase = file.lower()
if CommitList.keywordInFile(file, ['docker/', '.circleci', '.github', '.jenkins', '.azure_pipelines']):
category = 'releng'
break
# datapipe(s), torch/utils/data, test_{dataloader, datapipe}
if CommitList.keywordInFile(file, ['torch/utils/data', 'test_dataloader', 'test_datapipe']):
category = 'dataloader_frontend'
break
if CommitList.keywordInFile(file, ['torch/csrc/api', 'test/cpp/api']):
category = 'cpp_frontend'
break
if CommitList.keywordInFile(file, ['distributed', 'c10d']):
category = 'distributed'
break
if ('vulkan' in file_lowercase):
category = 'vulkan'
break
if ('Foreach' in file_lowercase):
category = 'foreach_frontend'
break
if 'onnx' in file_lowercase:
category = 'onnx'
break
if CommitList.keywordInFile(file, ['torch/fx', 'test_fx']):
category = 'fx'
break
if CommitList.keywordInFile(file, ['torch/ao', 'test/ao']):
category = 'ao'
break
# torch/quantization, test/quantization, aten/src/ATen/native/quantized, torch/nn/{quantized, quantizable}
if CommitList.keywordInFile(file, ['torch/quantization', 'test/quantization', 'aten/src/ATen/native/quantized', 'torch/nn/quantiz']):
category = 'quantization'
break
if CommitList.keywordInFile(file, ['torch/package', 'test/package']):
category = 'package'
break
if CommitList.keywordInFile(file, ['torch/csrc/jit/mobile', 'aten/src/ATen/native/metal', 'test/mobile', 'torch/backends/_nnapi/', 'test/test_nnapi.py']):
category = 'mobile'
break
if CommitList.keywordInFile(file, ['aten/src/ATen/native/LinearAlgebra.cpp', 'test/test_linalg.py', 'torch/linalg']):
category = 'linalg_frontend'
break
if CommitList.keywordInFile(file, ['torch/sparse', 'aten/src/ATen/native/sparse', 'torch/_masked/__init__.py']):
category = 'sparse_frontend'
break
if CommitList.keywordInFile(file, ['tools/autograd']):
category = 'autograd_frontend'
break
if CommitList.keywordInFile(file, ['test/test_nn.py', 'test/test_module.py', 'torch/nn/modules', 'torch/nn/functional.py']):
category = 'nn_frontend'
break
if CommitList.keywordInFile(file, ['torch/csrc/jit', 'torch/jit']):
category = 'jit'
break
else:
# Below are some extra quick checks that aren't necessarily file-path related,
# but I found that to catch a decent number of extra commits.
if len(files_changed) > 0 and all([f_name.endswith('.cu') or f_name.endswith('.cuh') for f_name in files_changed]):
category = 'cuda'
elif '[PyTorch Edge]' in title:
category = 'mobile'
elif len(files_changed) == 1 and 'torch/testing/_internal/common_methods_invocations.py' in files_changed[0]:
# when this is the only file changed, it's almost always an OpInfo change.
category = 'python_frontend'
elif len(files_changed) == 1 and 'torch/_torch_docs.py' in files_changed[0]:
# individual torch_docs changes are usually for python ops
category = 'python_frontend'
return category, topic
@staticmethod
def get_commits_between(base_version, new_version):
cmd = f'git merge-base {base_version} {new_version}'
rc, merge_base, _ = run(cmd)
assert rc == 0
# Returns a list of something like
# b33e38ec47 Allow a higher-precision step type for Vec256::arange (#34555)
cmd = f'git log --reverse --oneline {merge_base}..{new_version}'
rc, commits, _ = run(cmd)
assert rc == 0
log_lines = commits.split('\n')
hashes, titles = zip(*[log_line.split(' ', 1) for log_line in log_lines])
return [CommitList.gen_commit(commit_hash) for commit_hash in hashes]
def filter(self, *, category=None, topic=None):
commits = self.commits
if category is not None:
commits = [commit for commit in commits if commit.category == category]
if topic is not None:
commits = [commit for commit in commits if commit.topic == topic]
return commits
def update_to(self, new_version):
last_hash = self.commits[-1].commit_hash
new_commits = CommitList.get_commits_between(last_hash, new_version)
self.commits += new_commits
def stat(self):
counts = defaultdict(lambda: defaultdict(int))
for commit in self.commits:
counts[commit.category][commit.topic] += 1
return counts
def create_new(path, base_version, new_version):
commits = CommitList.create_new(path, base_version, new_version)
commits.write_result()
def update_existing(path, new_version):
commits = CommitList.from_existing(path)
commits.update_to(new_version)
commits.write_result()
def rerun_with_new_filters(path):
current_commits = CommitList.from_existing(path)
for i in range(len(current_commits.commits)):
c = current_commits.commits[i]
if 'Uncategorized' in str(c):
feature_item = get_commit_data_cache().get(c.commit_hash)
features = features_to_dict(feature_item)
category, topic = CommitList.categorize(features)
current_commits[i] = dataclasses.replace(c, category=category, topic=topic)
current_commits.write_result()
def get_hash_or_pr_url(commit: Commit):
# cdc = get_commit_data_cache()
pr_link = commit.pr_link
if pr_link is None:
return commit.commit_hash
else:
regex = r'https://github.com/pytorch/pytorch/pull/([0-9]+)'
matches = re.findall(regex, pr_link)
if len(matches) == 0:
return commit.commit_hash
return f'[#{matches[0]}]({pr_link})'
def to_markdown(commit_list: CommitList, category):
def cleanup_title(commit):
match = re.match(r'(.*) \(#\d+\)', commit.title)
if match is None:
return commit.title
return match.group(1)
merge_mapping = defaultdict(list)
for commit in commit_list.commits:
if commit.merge_into:
merge_mapping[commit.merge_into].append(commit)
cdc = get_commit_data_cache()
lines = [f'\n## {category}\n']
for topic in topics:
lines.append(f'### {topic}\n')
commits = commit_list.filter(category=category, topic=topic)
for commit in commits:
if commit.merge_into:
continue
all_related_commits = merge_mapping[commit.commit_hash] + [commit]
commit_list_md = ", ".join(get_hash_or_pr_url(c) for c in all_related_commits)
result = f'- {cleanup_title(commit)} ({commit_list_md})\n'
lines.append(result)
return lines
def get_markdown_header(category):
header = f"""
# Release Notes worksheet {category}
The main goal of this process is to rephrase all the commit messages below to make them clear and easy to read by the end user. You should follow the following instructions to do so:
* **Please cleanup, and format commit titles to be readable by the general pytorch user.** [Detailed intructions here](https://fb.quip.com/OCRoAbEvrRD9#HdaACARZZvo)
* Please sort commits into the following categories (you should not rename the categories!), I tried to pre-sort these to ease your work, feel free to move commits around if the current categorization is not good.
* Please drop any commits that are not user-facing.
* If anything is from another domain, leave it in the UNTOPICED section at the end and I'll come and take care of it.
The categories below are as follows:
* BC breaking: All commits that are BC-breaking. These are the most important commits. If any pre-sorted commit is actually BC-breaking, do move it to this section. Each commit should contain a paragraph explaining the rational behind the change as well as an example for how to update user code (guidelines here: https://quip.com/OCRoAbEvrRD9)
* Deprecations: All commits introducing deprecation. Each commit should include a small example explaining what should be done to update user code.
* new_features: All commits introducing a new feature (new functions, new submodule, new supported platform etc)
* improvements: All commits providing improvements to existing feature should be here (new backend for a function, new argument, better numerical stability)
* bug fixes: All commits that fix bugs and behaviors that do not match the documentation
* performance: All commits that are added mainly for performance (we separate this from improvements above to make it easier for users to look for it)
* documentation: All commits that add/update documentation
* Developers: All commits that are not end-user facing but still impact people that compile from source, develop into pytorch, extend pytorch, etc
"""
return [header, ]
def main():
parser = argparse.ArgumentParser(description='Tool to create a commit list')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--create_new', nargs=2)
group.add_argument('--update_to')
# I found this flag useful when experimenting with adding new auto-categorizing filters.
# After running commitlist.py the first time, if you add any new filters in this file,
# re-running with "rerun_with_new_filters" will update the existing commitlist.csv file,
# but only affect the rows that were previously marked as "Uncategorized"
group.add_argument('--rerun_with_new_filters', action='store_true')
group.add_argument('--stat', action='store_true')
group.add_argument('--export_markdown', action='store_true')
group.add_argument('--export_csv_categories', action='store_true')
parser.add_argument('--path', default='results/commitlist.csv')
args = parser.parse_args()
if args.create_new:
create_new(args.path, args.create_new[0], args.create_new[1])
return
if args.update_to:
update_existing(args.path, args.update_to)
return
if args.rerun_with_new_filters:
rerun_with_new_filters(args.path)
return
if args.stat:
commits = CommitList.from_existing(args.path)
stats = commits.stat()
pprint.pprint(stats)
return
if args.export_csv_categories:
commits = CommitList.from_existing(args.path)
categories = list(commits.stat().keys())
for category in categories:
print(f"Exporting {category}...")
filename = f'results/export/result_{category}.csv'
CommitList.write_to_disk_static(filename, commits.filter(category=category))
return
if args.export_markdown:
commits = CommitList.from_existing(args.path)
categories = list(commits.stat().keys())
for category in categories:
print(f"Exporting {category}...")
lines = get_markdown_header(category)
lines += to_markdown(commits, category)
filename = f'results/export/result_{category}.md'
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'w') as f:
f.writelines(lines)
return
raise AssertionError()
if __name__ == '__main__':
main()
| pytorch-master | scripts/release_notes/commitlist.py |
#! /usr/bin/env python3
import onnx.backend
import argparse
import caffe2.python.workspace as c2_workspace
import glob
import json
import numpy as np
import onnx
import caffe2.python.onnx.frontend
import caffe2.python.onnx.backend
import os
import shutil
import tarfile
import tempfile
import boto3
from six.moves.urllib.request import urlretrieve
from caffe2.python.models.download import downloadFromURLToFile, getURLFromName, deleteDirectory
from caffe2.proto import caffe2_pb2
from onnx import numpy_helper
"""A script converting Caffe2 models to ONNX, and updating ONNX model zoos.
Arguments:
-v, verbose
--local-dir, where we store the ONNX and Caffe2 models
--no-cache, ignore existing models in local-dir
--clean-test-data, delete all the existing test data when updating ONNX model zoo
--add-test-data, add add-test-data sets of test data for each ONNX model
--only-local, run locally (for testing purpose)
Examples:
# store the data in /home/username/zoo-dir, delete existing test data, ignore local cache,
# and generate 3 sets of new test data
python update-caffe2-models.py --local-dir /home/username/zoo-dir --clean-test-data --no-cache --add-test-data 3
"""
# TODO: Add GPU support
def upload_onnx_model(model_name, zoo_dir, backup=False, only_local=False):
if only_local:
print('No uploading in local only mode.')
return
model_dir = os.path.join(zoo_dir, model_name)
suffix = '-backup' if backup else ''
if backup:
print('Backing up the previous version of ONNX model {}...'.format(model_name))
rel_file_name = '{}{}.tar.gz'.format(model_name, suffix)
abs_file_name = os.path.join(zoo_dir, rel_file_name)
print('Compressing {} model to {}'.format(model_name, abs_file_name))
with tarfile.open(abs_file_name, 'w:gz') as f:
f.add(model_dir, arcname=model_name)
file_size = os.stat(abs_file_name).st_size
print('Uploading {} ({} MB) to s3 cloud...'.format(abs_file_name, float(file_size) / 1024 / 1024))
client = boto3.client('s3', 'us-east-1')
transfer = boto3.s3.transfer.S3Transfer(client)
transfer.upload_file(abs_file_name, 'download.onnx', 'models/latest/{}'.format(rel_file_name),
extra_args={'ACL': 'public-read'})
print('Successfully uploaded {} to s3!'.format(rel_file_name))
def download_onnx_model(model_name, zoo_dir, use_cache=True, only_local=False):
model_dir = os.path.join(zoo_dir, model_name)
if os.path.exists(model_dir):
if use_cache:
upload_onnx_model(model_name, zoo_dir, backup=True, only_local=only_local)
return
else:
shutil.rmtree(model_dir)
url = 'https://s3.amazonaws.com/download.onnx/models/latest/{}.tar.gz'.format(model_name)
download_file = tempfile.NamedTemporaryFile(delete=False)
try:
download_file.close()
print('Downloading ONNX model {} from {} and save in {} ...\n'.format(
model_name, url, download_file.name))
urlretrieve(url, download_file.name)
with tarfile.open(download_file.name) as t:
print('Extracting ONNX model {} to {} ...\n'.format(model_name, zoo_dir))
t.extractall(zoo_dir)
except Exception as e:
print('Failed to download/backup data for ONNX model {}: {}'.format(model_name, e))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
finally:
os.remove(download_file.name)
if not only_local:
upload_onnx_model(model_name, zoo_dir, backup=True, only_local=only_local)
def download_caffe2_model(model_name, zoo_dir, use_cache=True):
model_dir = os.path.join(zoo_dir, model_name)
if os.path.exists(model_dir):
if use_cache:
return
else:
shutil.rmtree(model_dir)
os.makedirs(model_dir)
for f in ['predict_net.pb', 'init_net.pb', 'value_info.json']:
url = getURLFromName(model_name, f)
dest = os.path.join(model_dir, f)
try:
try:
downloadFromURLToFile(url, dest,
show_progress=False)
except TypeError:
# show_progress not supported prior to
# Caffe2 78c014e752a374d905ecfb465d44fa16e02a28f1
# (Sep 17, 2017)
downloadFromURLToFile(url, dest)
except Exception as e:
print("Abort: {reason}".format(reason=e))
print("Cleaning up...")
deleteDirectory(model_dir)
raise
def caffe2_to_onnx(caffe2_model_name, caffe2_model_dir):
caffe2_init_proto = caffe2_pb2.NetDef()
caffe2_predict_proto = caffe2_pb2.NetDef()
with open(os.path.join(caffe2_model_dir, 'init_net.pb'), 'rb') as f:
caffe2_init_proto.ParseFromString(f.read())
caffe2_init_proto.name = '{}_init'.format(caffe2_model_name)
with open(os.path.join(caffe2_model_dir, 'predict_net.pb'), 'rb') as f:
caffe2_predict_proto.ParseFromString(f.read())
caffe2_predict_proto.name = caffe2_model_name
with open(os.path.join(caffe2_model_dir, 'value_info.json'), 'rb') as f:
value_info = json.loads(f.read())
print('Converting Caffe2 model {} in {} to ONNX format'.format(caffe2_model_name, caffe2_model_dir))
onnx_model = caffe2.python.onnx.frontend.caffe2_net_to_onnx_model(
init_net=caffe2_init_proto,
predict_net=caffe2_predict_proto,
value_info=value_info
)
return onnx_model, caffe2_init_proto, caffe2_predict_proto
def tensortype_to_ndarray(tensor_type):
shape = []
for dim in tensor_type.shape.dim:
shape.append(dim.dim_value)
if tensor_type.elem_type == onnx.TensorProto.FLOAT:
type = np.float32
elif tensor_type.elem_type == onnx.TensorProto.INT:
type = np.int32
else:
raise
array = np.random.rand(*shape).astype(type)
return array
def generate_test_input_data(onnx_model, scale):
real_inputs_names = list(set([input.name for input in onnx_model.graph.input]) - set([init.name for init in onnx_model.graph.initializer]))
real_inputs = []
for name in real_inputs_names:
for input in onnx_model.graph.input:
if name == input.name:
real_inputs.append(input)
test_inputs = []
for input in real_inputs:
ndarray = tensortype_to_ndarray(input.type.tensor_type)
test_inputs.append((input.name, ndarray * scale))
return test_inputs
def generate_test_output_data(caffe2_init_net, caffe2_predict_net, inputs):
p = c2_workspace.Predictor(caffe2_init_net, caffe2_predict_net)
inputs_map = {input[0]:input[1] for input in inputs}
output = p.run(inputs_map)
c2_workspace.ResetWorkspace()
return output
def onnx_verify(onnx_model, inputs, ref_outputs):
prepared = caffe2.python.onnx.backend.prepare(onnx_model)
onnx_inputs = []
for input in inputs:
if isinstance(input, tuple):
onnx_inputs.append(input[1])
else:
onnx_inputs.append(input)
onnx_outputs = prepared.run(inputs=onnx_inputs)
np.testing.assert_almost_equal(onnx_outputs, ref_outputs, decimal=3)
model_mapping = {
'bvlc_alexnet': 'bvlc_alexnet',
'bvlc_googlenet': 'bvlc_googlenet',
'bvlc_reference_caffenet': 'bvlc_reference_caffenet',
'bvlc_reference_rcnn_ilsvrc13': 'bvlc_reference_rcnn_ilsvrc13',
'densenet121': 'densenet121',
#'finetune_flickr_style': 'finetune_flickr_style',
'inception_v1': 'inception_v1',
'inception_v2': 'inception_v2',
'resnet50': 'resnet50',
'shufflenet': 'shufflenet',
'squeezenet': 'squeezenet_old',
#'vgg16': 'vgg16',
'vgg19': 'vgg19',
'zfnet512': 'zfnet512',
}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Update the ONNX models.')
parser.add_argument('-v', action="store_true", default=False, help="verbose")
parser.add_argument("--local-dir", type=str, default=os.path.expanduser('~'),
help="local dir to store Caffe2 and ONNX models")
parser.add_argument("--no-cache", action="store_true", default=False,
help="whether use local ONNX models")
parser.add_argument('--clean-test-data', action="store_true", default=False,
help="remove the old test data")
parser.add_argument('--add-test-data', type=int, default=0,
help="add new test data")
parser.add_argument('--only-local', action="store_true", default=False,
help="no upload including backup")
args = parser.parse_args()
delete_test_data = args.clean_test_data
add_test_data = args.add_test_data
use_cache = not args.no_cache
only_local = args.only_local
root_dir = args.local_dir
caffe2_zoo_dir = os.path.join(root_dir, ".caffe2", "models")
onnx_zoo_dir = os.path.join(root_dir, ".onnx", "models")
for onnx_model_name in model_mapping:
c2_model_name = model_mapping[onnx_model_name]
print('####### Processing ONNX model {} ({} in Caffe2) #######'.format(onnx_model_name, c2_model_name))
download_caffe2_model(c2_model_name, caffe2_zoo_dir, use_cache=use_cache)
download_onnx_model(onnx_model_name, onnx_zoo_dir, use_cache=use_cache, only_local=only_local)
onnx_model_dir = os.path.join(onnx_zoo_dir, onnx_model_name)
if delete_test_data:
print('Deleting all the existing test data...')
# NB: For now, we don't delete the npz files.
#for f in glob.glob(os.path.join(onnx_model_dir, '*.npz')):
# os.remove(f)
for f in glob.glob(os.path.join(onnx_model_dir, 'test_data_set*')):
shutil.rmtree(f)
onnx_model, c2_init_net, c2_predict_net = caffe2_to_onnx(c2_model_name, os.path.join(caffe2_zoo_dir, c2_model_name))
print('Deleteing old ONNX {} model...'.format(onnx_model_name))
for f in glob.glob(os.path.join(onnx_model_dir, 'model*'.format(onnx_model_name))):
os.remove(f)
print('Serializing generated ONNX {} model ...'.format(onnx_model_name))
with open(os.path.join(onnx_model_dir, 'model.onnx'), 'wb') as file:
file.write(onnx_model.SerializeToString())
print('Verifying model {} with ONNX model checker...'.format(onnx_model_name))
onnx.checker.check_model(onnx_model)
total_existing_data_set = 0
print('Verifying model {} with existing test data...'.format(onnx_model_name))
for f in glob.glob(os.path.join(onnx_model_dir, '*.npz')):
test_data = np.load(f, encoding='bytes')
inputs = list(test_data['inputs'])
ref_outputs = list(test_data['outputs'])
onnx_verify(onnx_model, inputs, ref_outputs)
total_existing_data_set += 1
for f in glob.glob(os.path.join(onnx_model_dir, 'test_data_set*')):
inputs = []
inputs_num = len(glob.glob(os.path.join(f, 'input_*.pb')))
for i in range(inputs_num):
tensor = onnx.TensorProto()
with open(os.path.join(f, 'input_{}.pb'.format(i)), 'rb') as pf:
tensor.ParseFromString(pf.read())
inputs.append(numpy_helper.to_array(tensor))
ref_outputs = []
ref_outputs_num = len(glob.glob(os.path.join(f, 'output_*.pb')))
for i in range(ref_outputs_num):
tensor = onnx.TensorProto()
with open(os.path.join(f, 'output_{}.pb'.format(i)), 'rb') as pf:
tensor.ParseFromString(pf.read())
ref_outputs.append(numpy_helper.to_array(tensor))
onnx_verify(onnx_model, inputs, ref_outputs)
total_existing_data_set += 1
starting_index = 0
while os.path.exists(os.path.join(onnx_model_dir, 'test_data_set_{}'.format(starting_index))):
starting_index += 1
if total_existing_data_set == 0 and add_test_data == 0:
add_test_data = 3
total_existing_data_set = 3
print('Generating {} sets of new test data...'.format(add_test_data))
for i in range(starting_index, add_test_data + starting_index):
data_dir = os.path.join(onnx_model_dir, 'test_data_set_{}'.format(i))
os.makedirs(data_dir)
inputs = generate_test_input_data(onnx_model, 255)
ref_outputs = generate_test_output_data(c2_init_net, c2_predict_net, inputs)
onnx_verify(onnx_model, inputs, ref_outputs)
for index, input in enumerate(inputs):
tensor = numpy_helper.from_array(input[1])
with open(os.path.join(data_dir, 'input_{}.pb'.format(index)), 'wb') as file:
file.write(tensor.SerializeToString())
for index, output in enumerate(ref_outputs):
tensor = numpy_helper.from_array(output)
with open(os.path.join(data_dir, 'output_{}.pb'.format(index)), 'wb') as file:
file.write(tensor.SerializeToString())
del onnx_model
del c2_init_net
del c2_predict_net
upload_onnx_model(onnx_model_name, onnx_zoo_dir, backup=False, only_local=only_local)
print('\n\n')
| pytorch-master | scripts/model_zoo/update-models-from-caffe2.py |
#! /usr/bin/env python3
import os
import subprocess
import sys
import tarfile
import tempfile
from six.moves.urllib.request import urlretrieve
from caffe2.python.models.download import downloadFromURLToFile, getURLFromName, deleteDirectory
class SomeClass:
# largely copied from
# https://github.com/onnx/onnx-caffe2/blob/master/tests/caffe2_ref_test.py
def _download(self, model):
model_dir = self._caffe2_model_dir(model)
assert not os.path.exists(model_dir)
os.makedirs(model_dir)
for f in ['predict_net.pb', 'init_net.pb', 'value_info.json']:
url = getURLFromName(model, f)
dest = os.path.join(model_dir, f)
try:
try:
downloadFromURLToFile(url, dest,
show_progress=False)
except TypeError:
# show_progress not supported prior to
# Caffe2 78c014e752a374d905ecfb465d44fa16e02a28f1
# (Sep 17, 2017)
downloadFromURLToFile(url, dest)
except Exception as e:
print("Abort: {reason}".format(reason=e))
print("Cleaning up...")
deleteDirectory(model_dir)
exit(1)
def _caffe2_model_dir(self, model):
caffe2_home = os.path.expanduser('~/.caffe2')
models_dir = os.path.join(caffe2_home, 'models')
return os.path.join(models_dir, model)
def _onnx_model_dir(self, model):
onnx_home = os.path.expanduser('~/.onnx')
models_dir = os.path.join(onnx_home, 'models')
model_dir = os.path.join(models_dir, model)
return model_dir, os.path.dirname(model_dir)
# largely copied from
# https://github.com/onnx/onnx/blob/master/onnx/backend/test/runner/__init__.py
def _prepare_model_data(self, model):
model_dir, models_dir = self._onnx_model_dir(model)
if os.path.exists(model_dir):
return
os.makedirs(model_dir)
url = 'https://s3.amazonaws.com/download.onnx/models/{}.tar.gz'.format(model)
# On Windows, NamedTemporaryFile cannot be opened for a
# second time
download_file = tempfile.NamedTemporaryFile(delete=False)
try:
download_file.close()
print('Start downloading model {} from {}'.format(model, url))
urlretrieve(url, download_file.name)
print('Done')
with tarfile.open(download_file.name) as t:
t.extractall(models_dir)
except Exception as e:
print('Failed to prepare data for model {}: {}'.format(model, e))
raise
finally:
os.remove(download_file.name)
models = [
'bvlc_alexnet',
'densenet121',
'inception_v1',
'inception_v2',
'resnet50',
# TODO currently onnx can't translate squeezenet :(
# 'squeezenet',
'vgg16',
# TODO currently vgg19 doesn't work in the CI environment,
# possibly due to OOM
# 'vgg19'
]
def download_models():
sc = SomeClass()
for model in models:
print('update-caffe2-models.py: downloading', model)
caffe2_model_dir = sc._caffe2_model_dir(model)
onnx_model_dir, onnx_models_dir = sc._onnx_model_dir(model)
if not os.path.exists(caffe2_model_dir):
sc._download(model)
if not os.path.exists(onnx_model_dir):
sc._prepare_model_data(model)
def generate_models():
sc = SomeClass()
for model in models:
print('update-caffe2-models.py: generating', model)
caffe2_model_dir = sc._caffe2_model_dir(model)
onnx_model_dir, onnx_models_dir = sc._onnx_model_dir(model)
subprocess.check_call(['echo', model])
with open(os.path.join(caffe2_model_dir, 'value_info.json'), 'r') as f:
value_info = f.read()
subprocess.check_call([
'convert-caffe2-to-onnx',
'--caffe2-net-name', model,
'--caffe2-init-net', os.path.join(caffe2_model_dir, 'init_net.pb'),
'--value-info', value_info,
'-o', os.path.join(onnx_model_dir, 'model.pb'),
os.path.join(caffe2_model_dir, 'predict_net.pb')
])
subprocess.check_call([
'tar',
'-czf',
model + '.tar.gz',
model
], cwd=onnx_models_dir)
def upload_models():
sc = SomeClass()
for model in models:
print('update-caffe2-models.py: uploading', model)
onnx_model_dir, onnx_models_dir = sc._onnx_model_dir(model)
subprocess.check_call([
'aws',
's3',
'cp',
model + '.tar.gz',
"s3://download.onnx/models/{}.tar.gz".format(model),
'--acl', 'public-read'
], cwd=onnx_models_dir)
def cleanup():
sc = SomeClass()
for model in models:
onnx_model_dir, onnx_models_dir = sc._onnx_model_dir(model)
os.remove(os.path.join(os.path.dirname(onnx_model_dir), model + '.tar.gz'))
if __name__ == '__main__':
try:
subprocess.check_call(['aws', 'sts', 'get-caller-identity'])
except:
print('update-caffe2-models.py: please run `aws configure` manually to set up credentials')
sys.exit(1)
if sys.argv[1] == 'download':
download_models()
if sys.argv[1] == 'generate':
generate_models()
elif sys.argv[1] == 'upload':
upload_models()
elif sys.argv[1] == 'cleanup':
cleanup()
| pytorch-master | scripts/model_zoo/update-caffe2-models.py |
import argparse
import functools
import traceback
from torch.utils.jit.log_extract import extract_ir, load_graph_and_inputs, run_baseline_no_fusion, run_nnc, run_nvfuser
from typing import List, Tuple, Callable, Optional
'''
Usage:
1. Run your script and pipe into a log file
PYTORCH_JIT_LOG_LEVEL=">>graph_fuser" python3 my_test.py &> log.txt
2. Run log_extract:
log_extract.py log.txt --nvfuser --nnc-dynamic --nnc-static
You can also extract the list of extracted IR:
log_extract.py log.txt --output
Passing in --graphs 0 2 will only run graphs 0 and 2
'''
def test_runners(graphs: List[str], runners: List[Tuple[str, Callable]], graph_set: Optional[List[int]]):
for i, ir in enumerate(graphs):
_, inputs = load_graph_and_inputs(ir)
if graph_set and i not in graph_set:
continue
print(f"Running Graph {i}")
prev_result = None
prev_runner_name = None
for runner in runners:
runner_name, runner_fn = runner
try:
result = runner_fn(ir, inputs)
if prev_result:
improvement = (prev_result / result - 1) * 100
print(f"{runner_name} : {result:.6f} ms improvement over {prev_runner_name}: improvement: {improvement:.2f}%")
else:
print(f"{runner_name} : {result:.6f} ms")
prev_result = result
prev_runner_name = runner_name
except RuntimeError:
print(f" Graph {i} failed for {runner_name} :", traceback.format_exc())
def run():
parser = argparse.ArgumentParser(
description="Extracts torchscript IR from log files and, optionally, benchmarks it or outputs the IR"
)
parser.add_argument("filename", help="Filename of log file")
parser.add_argument("--nvfuser", dest="nvfuser", action="store_true", help="benchmark nvfuser")
parser.add_argument("--no-nvfuser", dest="nvfuser", action="store_false", help="DON'T benchmark nvfuser")
parser.set_defaults(nvfuser=False)
parser.add_argument("--nnc-static", dest="nnc_static", action="store_true", help="benchmark nnc static")
parser.add_argument("--no-nnc-static", dest="nnc_static", action="store_false", help="DON'T benchmark nnc static")
parser.set_defaults(nnc_static=False)
parser.add_argument("--nnc-dynamic", dest="nnc_dynamic", action="store_true", help="nnc with dynamic shapes")
parser.add_argument(
"--no-nnc-dynamic",
dest="nnc_dynamic",
action="store_false",
help="DONT't benchmark nnc with dynamic shapes")
parser.set_defaults(nnc_dynamic=False)
parser.add_argument("--baseline", dest="baseline", action="store_true", help="benchmark baseline")
parser.add_argument("--no-baseline", dest="baseline", action="store_false", help="DON'T benchmark baseline")
parser.set_defaults(baseline=False)
parser.add_argument("--output", dest="output", action="store_true", help="Output graph IR")
parser.add_argument("--no-output", dest="output", action="store_false", help="DON'T output graph IR")
parser.set_defaults(output=False)
parser.add_argument('--graphs', nargs="+", type=int, help="Run only specified graph indices")
args = parser.parse_args()
graphs = extract_ir(args.filename)
graph_set = args.graphs
graph_set = graph_set if graph_set else None
options = []
if args.baseline:
options.append(("Baseline no fusion", run_baseline_no_fusion))
if args.nnc_dynamic:
options.append(("NNC Dynamic", functools.partial(run_nnc, dynamic=True)))
if args.nnc_static:
options.append(("NNC Static", functools.partial(run_nnc, dynamic=False)))
if args.nvfuser:
options.append(("NVFuser", run_nvfuser))
test_runners(graphs, options, graph_set)
if args.output:
quoted = []
for i, ir in enumerate(graphs):
if graph_set and i not in graph_set:
continue
quoted.append("\"\"\"" + ir + "\"\"\"")
print("[" + ", ".join(quoted) + "]")
if __name__ == "__main__":
run()
| pytorch-master | scripts/jit/log_extract.py |
#!/usr/bin/env python3
"""
This script finds the user/pr creator responsible for labeling a PR by a commit SHA. It is used by the workflow in
'.github/workflows/pr-labels.yml'. If there exists no PR associated with the commit or the PR is properly labeled,
this script is a no-op.
Note: we ping the user only, not the reviewers, as the reviewers can sometimes be external to pytorch
with no labeling responsibility, so we don't want to bother them.
This script is based on: https://github.com/pytorch/vision/blob/main/.github/process_commit.py
"""
import sys
from typing import Any, Set, Tuple, List
import re
import os
import json
import requests
# For a PR to be properly labeled it should have release notes label and one topic label
PULL_REQUEST_EXP = "Pull Request resolved:.*pull/(.*)"
PRIMARY_LABEL_FILTER = "release notes:"
SECONDARY_LABELS = {
"topic: bc_breaking",
"topic: deprecation",
"topic: new feature",
"topic: improvements",
"topic: bug fixes",
"topic: performance",
"topic: documentation",
"topic: developer feature",
"topic: not user facing",
}
# This secondary does not require a primary
ALLOWED_ONLY_SECONDARY = {"topic: not user facing"}
PYTORCH_REPO = "https://api.github.com/repos/pytorch/pytorch"
GITHUB_TOKEN = os.environ.get('GITHUB_TOKEN')
REQUEST_HEADERS = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {GITHUB_TOKEN}'}
def query_pytorch(cmd: str) -> Any:
response = requests.get(f"{PYTORCH_REPO}/{cmd}", headers=REQUEST_HEADERS)
return response.json()
def get_pr_number(commit_hash: str) -> Any:
data = query_pytorch(f"commits/{commit_hash}")
if not data or (not data["commit"]["message"]):
return None
message = data["commit"]["message"]
p = re.compile(PULL_REQUEST_EXP)
result = p.search(message)
if not result:
return None
return result.group(1)
def get_pr_author_and_labels(pr_number: int) -> Tuple[str, Set[str]]:
# See https://docs.github.com/en/rest/reference/pulls#get-a-pull-request
data = query_pytorch(f"pulls/{pr_number}")
user = data["user"]["login"]
labels = {label["name"] for label in data["labels"]}
return user, labels
def get_repo_labels() -> List[str]:
collected_labels: List[str] = list()
for page in range(0, 10):
response = query_pytorch(f"labels?per_page=100&page={page}")
page_labels = list(map(lambda x: str(x["name"]), response))
if not page_labels:
break
collected_labels += page_labels
return collected_labels
def post_pytorch_comment(pr_number: int, merger: str) -> Any:
message = {'body' : f"Hey @{merger}." + """
You've committed this PR, but it does not have both a 'release notes: ...' and 'topics: ...' label. \
Please add one of each to the PR. The 'release notes: ...' label should represent the part of \
PyTorch that this PR changes (fx, autograd, distributed, etc) and the 'topics: ...' label should \
represent the kind of PR it is (not user facing, new feature, bug fix, perf improvement, etc). \
The list of valid labels can be found [here](https://github.com/pytorch/pytorch/labels?q=release+notes) \
for the 'release notes: ...' and [here](https://github.com/pytorch/pytorch/labels?q=topic) for the \
'topics: ...'.
For changes that are 'topic: not user facing' there is no need for a release notes label."""}
response = requests.post(
f"{PYTORCH_REPO}/issues/{pr_number}/comments",
json.dumps(message),
headers=REQUEST_HEADERS)
return response.json()
if __name__ == "__main__":
commit_hash = sys.argv[1]
pr_number = get_pr_number(commit_hash)
if not pr_number:
sys.exit(0)
user, labels = get_pr_author_and_labels(pr_number)
repo_labels = get_repo_labels()
primary_labels = set(filter(lambda x: x.startswith(PRIMARY_LABEL_FILTER), repo_labels))
has_both_labels = bool(primary_labels.intersection(labels) and SECONDARY_LABELS.intersection(labels))
is_properly_labeled = has_both_labels or bool(ALLOWED_ONLY_SECONDARY.intersection(labels))
if not is_properly_labeled:
post_pytorch_comment(pr_number, user)
| pytorch-master | .github/scripts/process_commit.py |
from unittest import TestCase, mock, main
from test_trymerge import mocked_gh_graphql
from trymerge import GitHubPR
from gitutils import get_git_remote_name, get_git_repo_dir, GitRepo
from typing import Any
from tryrebase import rebase_onto
class TestRebase(TestCase):
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
@mock.patch('gitutils.GitRepo._run_git')
@mock.patch('tryrebase.gh_post_comment')
def test_rebase(self, mocked_post_comment: Any, mocked_run_git: Any, mocked_gql: Any) -> None:
"Tests rebase successfully"
pr = GitHubPR("pytorch", "pytorch", 31093)
repo = GitRepo(get_git_repo_dir(), get_git_remote_name())
rebase_onto(pr, repo, 'master')
calls = [mock.call('fetch', 'origin', 'pull/31093/head:pull/31093/head'),
mock.call('rebase', 'refs/remotes/origin/master', 'pull/31093/head'),
mock.call('push', '-f', 'https://github.com/mingxiaoh/pytorch.git', 'pull/31093/head:master')]
mocked_run_git.assert_has_calls(calls)
self.assertTrue(
"Successfully rebased `master` onto `refs/remotes/origin/master`" in mocked_post_comment.call_args[0][3])
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
@mock.patch('gitutils.GitRepo._run_git')
@mock.patch('tryrebase.gh_post_comment')
def test_rebase_to_stable(self, mocked_post_comment: Any, mocked_run_git: Any, mocked_gql: Any) -> None:
"Tests rebase to viable/strict successfully"
pr = GitHubPR("pytorch", "pytorch", 31093)
repo = GitRepo(get_git_repo_dir(), get_git_remote_name())
rebase_onto(pr, repo, 'viable/strict', False)
calls = [mock.call('fetch', 'origin', 'pull/31093/head:pull/31093/head'),
mock.call('rebase', 'refs/remotes/origin/viable/strict', 'pull/31093/head'),
mock.call('push', '-f', 'https://github.com/mingxiaoh/pytorch.git', 'pull/31093/head:master')]
mocked_run_git.assert_has_calls(calls)
self.assertTrue(
"Successfully rebased `master` onto `refs/remotes/origin/viable/strict`" in mocked_post_comment.call_args[0][3])
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
@mock.patch('gitutils.GitRepo._run_git', return_value="Everything up-to-date")
@mock.patch('tryrebase.gh_post_comment')
def test_no_need_to_rebase(self, mocked_post_comment: Any, mocked_run_git: Any, mocked_gql: Any) -> None:
"Tests branch already up to date"
pr = GitHubPR("pytorch", "pytorch", 31093)
repo = GitRepo(get_git_repo_dir(), get_git_remote_name())
rebase_onto(pr, repo, 'master')
calls = [mock.call('fetch', 'origin', 'pull/31093/head:pull/31093/head'),
mock.call('rebase', 'refs/remotes/origin/master', 'pull/31093/head'),
mock.call('push', '-f', 'https://github.com/mingxiaoh/pytorch.git', 'pull/31093/head:master')]
mocked_run_git.assert_has_calls(calls)
self.assertTrue(
"Tried to rebase and push PR #31093, but it was already up to date" in mocked_post_comment.call_args[0][3])
if __name__ == "__main__":
main()
| pytorch-master | .github/scripts/test_tryrebase.py |
#!/usr/bin/env python3
import base64
import json
import os
import re
import time
import urllib.parse
from dataclasses import dataclass
from datetime import datetime
from functools import lru_cache
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Pattern,
Tuple,
Union,
cast,
)
from urllib.error import HTTPError
from urllib.request import Request, urlopen
from warnings import warn
from gitutils import (
GitRepo,
get_git_remote_name,
get_git_repo_dir,
patterns_to_regex,
)
from trymerge_explainer import (
TryMergeExplainer,
get_land_check_troubleshooting_message,
get_revert_message,
)
GH_PR_REVIEWS_FRAGMENT = """
fragment PRReviews on PullRequestReviewConnection {
nodes {
author {
login
}
state
}
pageInfo {
startCursor
hasPreviousPage
}
}
"""
GH_CHECKSUITES_FRAGMENT = """
fragment PRCheckSuites on CheckSuiteConnection {
edges {
node {
app {
name
databaseId
}
workflowRun {
workflow {
name
}
}
checkRuns(first: 50) {
nodes {
name
conclusion
detailsUrl
}
pageInfo {
endCursor
hasNextPage
}
}
conclusion
url
}
cursor
}
pageInfo {
hasNextPage
}
}
"""
GH_COMMIT_AUTHORS_FRAGMENT = """
fragment CommitAuthors on PullRequestCommitConnection {
nodes {
commit {
author {
user {
login
}
email
name
}
oid
}
}
pageInfo {
endCursor
hasNextPage
}
}
"""
GH_GET_PR_INFO_QUERY = GH_PR_REVIEWS_FRAGMENT + GH_CHECKSUITES_FRAGMENT + GH_COMMIT_AUTHORS_FRAGMENT + """
query ($owner: String!, $name: String!, $number: Int!) {
repository(owner: $owner, name: $name) {
pullRequest(number: $number) {
closed
isCrossRepository
author {
login
}
title
body
headRefName
headRepository {
nameWithOwner
}
baseRefName
baseRepository {
nameWithOwner
isPrivate
defaultBranchRef {
name
}
}
mergeCommit {
oid
}
commits_with_authors: commits(first: 100) {
...CommitAuthors
totalCount
}
commits(last: 1) {
nodes {
commit {
checkSuites(first: 10) {
...PRCheckSuites
}
pushedDate
oid
}
}
}
changedFiles
files(first: 100) {
nodes {
path
}
pageInfo {
endCursor
hasNextPage
}
}
reviews(last: 100) {
...PRReviews
}
comments(last: 5) {
nodes {
bodyText
author {
login
}
authorAssociation
editor {
login
}
databaseId
}
pageInfo {
startCursor
hasPreviousPage
}
}
labels(first: 100) {
edges {
node {
name
}
}
}
}
}
}
"""
GH_GET_PR_NEXT_FILES_QUERY = """
query ($owner: String!, $name: String!, $number: Int!, $cursor: String!) {
repository(name: $name, owner: $owner) {
pullRequest(number: $number) {
files(first: 100, after: $cursor) {
nodes {
path
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
}
"""
GH_GET_PR_NEXT_CHECKSUITES = GH_CHECKSUITES_FRAGMENT + """
query ($owner: String!, $name: String!, $number: Int!, $cursor: String!) {
repository(name: $name, owner: $owner) {
pullRequest(number: $number) {
commits(last: 1) {
nodes {
commit {
oid
checkSuites(first: 10, after: $cursor) {
...PRCheckSuites
}
}
}
}
}
}
}
"""
GH_GET_COMMIT_CHECKSUITES = GH_CHECKSUITES_FRAGMENT + """
query ($owner: String!, $name: String!, $commit: String) {
repository(name: $name, owner: $owner) {
object(expression: $commit) {
... on Commit {
checkSuites {
...PRCheckSuites
}
}
}
}
}
"""
GH_GET_COMMIT_NEXT_CHECKSUITES = GH_CHECKSUITES_FRAGMENT + """
query ($owner: String!, $name: String!, $commit: String, $cursor: String!) {
repository(name: $name, owner: $owner) {
object(expression: $commit) {
... on Commit {
oid
checkSuites(first: 10, after: $cursor) {
...PRCheckSuites
}
}
}
}
}
"""
GH_GET_COMMIT_NEXT_CHECK_RUNS = """
query ($owner: String!, $name: String!, $cs_cursor: String, $cr_cursor: String!, $commit: String) {
repository(name: $name, owner: $owner) {
object(expression: $commit) {
... on Commit {
oid
checkSuites(first: 1, after: $cs_cursor) {
nodes {
checkRuns(first: 100, after: $cr_cursor) {
nodes {
name
conclusion
detailsUrl
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
}
}
}
}
"""
GH_GET_PR_NEXT_CHECK_RUNS = """
query ($owner: String!, $name: String!, $number: Int!, $cs_cursor: String, $cr_cursor: String!) {
repository(name: $name, owner: $owner) {
pullRequest(number: $number) {
commits(last: 1) {
nodes {
commit {
oid
checkSuites(first: 1, after: $cs_cursor) {
nodes {
checkRuns(first: 100, after: $cr_cursor) {
nodes {
name
conclusion
detailsUrl
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
}
}
}
}
}
}
"""
GH_GET_PR_PREV_COMMENTS = """
query ($owner: String!, $name: String!, $number: Int!, $cursor: String!) {
repository(name: $name, owner: $owner) {
pullRequest(number: $number) {
comments(last: 100, before: $cursor) {
nodes {
bodyText
author {
login
}
authorAssociation
editor {
login
}
databaseId
}
pageInfo {
startCursor
hasPreviousPage
}
}
}
}
}
"""
# This query needs read-org permission
GH_GET_TEAM_MEMBERS_QUERY = """
query($org: String!, $name: String!, $cursor: String) {
organization(login: $org) {
team(slug: $name) {
members(first: 100, after: $cursor) {
nodes {
login
}
pageInfo {
hasNextPage
endCursor
}
}
}
}
}
"""
GH_GET_PR_NEXT_AUTHORS_QUERY = GH_COMMIT_AUTHORS_FRAGMENT + """
query ($owner: String!, $name: String!, $number: Int!, $cursor: String) {
repository(name: $name, owner: $owner) {
pullRequest(number: $number) {
commits_with_authors: commits(first: 100, after: $cursor) {
...CommitAuthors
}
}
}
}
"""
GH_GET_PR_PREV_REVIEWS_QUERY = GH_PR_REVIEWS_FRAGMENT + """
query ($owner: String!, $name: String!, $number: Int!, $cursor: String!) {
repository(name: $name, owner: $owner) {
pullRequest(number: $number) {
reviews(last: 100, before: $cursor) {
...PRReviews
}
}
}
}
"""
RE_GHSTACK_HEAD_REF = re.compile(r"^(gh/[^/]+/[0-9]+/)head$")
RE_GHSTACK_DESC = re.compile(r'Stack.*:\r?\n(\* [^\r\n]+\r?\n)+', re.MULTILINE)
RE_PULL_REQUEST_RESOLVED = re.compile(
r'Pull Request resolved: '
r'https://github.com/(?P<owner>[^/]+)/(?P<repo>[^/]+)/pull/(?P<number>[0-9]+)',
re.MULTILINE
)
RE_DIFF_REV = re.compile(r'^Differential Revision:.+?(D[0-9]+)', re.MULTILINE)
CIFLOW_LABEL = re.compile(r"^ciflow/.+")
CIFLOW_TRUNK_LABEL = re.compile(r"^ciflow/trunk")
def _fetch_url(url: str, *,
headers: Optional[Dict[str, str]] = None,
data: Optional[Dict[str, Any]] = None,
method: Optional[str] = None,
reader: Callable[[Any], Any] = lambda x: x.read()) -> Any:
if headers is None:
headers = {}
token = os.environ.get("GITHUB_TOKEN")
if token is not None and url.startswith('https://api.github.com/'):
headers['Authorization'] = f'token {token}'
data_ = json.dumps(data).encode() if data is not None else None
try:
with urlopen(Request(url, headers=headers, data=data_, method=method)) as conn:
return reader(conn)
except HTTPError as err:
if err.code == 403 and all(key in err.headers for key in ['X-RateLimit-Limit', 'X-RateLimit-Used']):
print(f"Rate limit exceeded: {err.headers['X-RateLimit-Used']}/{err.headers['X-RateLimit-Limit']}")
raise
def fetch_json(url: str,
params: Optional[Dict[str, Any]] = None,
data: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]:
headers = {'Accept': 'application/vnd.github.v3+json'}
if params is not None and len(params) > 0:
url += '?' + '&'.join(f"{name}={urllib.parse.quote(str(val))}" for name, val in params.items())
return cast(List[Dict[str, Any]], _fetch_url(url, headers=headers, data=data, reader=json.load))
def fetch_json_dict(url: str,
params: Optional[Dict[str, Any]] = None,
data: Optional[Dict[str, Any]] = None) -> Dict[str, Any] :
headers = {'Accept': 'application/vnd.github.v3+json'}
if params is not None and len(params) > 0:
url += '?' + '&'.join(f"{name}={urllib.parse.quote(str(val))}" for name, val in params.items())
return cast(Dict[str, Any], _fetch_url(url, headers=headers, data=data, reader=json.load))
def _gh_post_comment(url: str, comment: str, dry_run: bool = False) -> List[Dict[str, Any]]:
if dry_run:
print(comment)
return []
return fetch_json(url, data={"body": comment})
def gh_post_pr_comment(org: str, project: str, pr_num: int, comment: str, dry_run: bool = False) -> List[Dict[str, Any]]:
return _gh_post_comment(f'https://api.github.com/repos/{org}/{project}/issues/{pr_num}/comments', comment, dry_run)
def gh_post_commit_comment(org: str, project: str, sha: str, comment: str, dry_run: bool = False) -> List[Dict[str, Any]]:
return _gh_post_comment(f'https://api.github.com/repos/{org}/{project}/commits/{sha}/comments', comment, dry_run)
def gh_add_labels(org: str, project: str, pr_num: int, labels: Union[str, List[str]]) -> None:
fetch_json(f'https://api.github.com/repos/{org}/{project}/issues/{pr_num}/labels',
data={"labels": labels})
def gh_graphql(query: str, **kwargs: Any) -> Dict[str, Any]:
rc = _fetch_url("https://api.github.com/graphql", data={"query": query, "variables": kwargs}, reader=json.load)
if "errors" in rc:
raise RuntimeError(f"GraphQL query {query}, args {kwargs} failed: {rc['errors']}")
return cast(Dict[str, Any], rc)
def gh_get_pr_info(org: str, proj: str, pr_no: int) -> Any:
rc = gh_graphql(GH_GET_PR_INFO_QUERY, name=proj, owner=org, number=pr_no)
return rc["data"]["repository"]["pullRequest"]
def gh_get_land_check_info(org: str, proj: str, commit: str) -> Any:
rc = gh_graphql(GH_GET_COMMIT_CHECKSUITES, name=proj, owner=org, commit=commit)
return rc["data"]["repository"]["object"]
@lru_cache(maxsize=None)
def gh_get_team_members(org: str, name: str) -> List[str]:
rc: List[str] = []
team_members: Dict[str, Any] = {"pageInfo": {"hasNextPage": "true", "endCursor": None}}
while bool(team_members["pageInfo"]["hasNextPage"]):
query = gh_graphql(GH_GET_TEAM_MEMBERS_QUERY, org=org, name=name, cursor=team_members["pageInfo"]["endCursor"])
team = query["data"]["organization"]["team"]
if team is None:
warn(f"Requested non-existing team {org}/{name}")
return []
team_members = team["members"]
rc += [member["login"] for member in team_members["nodes"]]
return rc
def get_check_run_name_prefix(workflow_run: Any) -> str:
if workflow_run is None:
return ""
else:
return f'{workflow_run["workflow"]["name"]} / '
def add_workflow_conclusions(
checksuites: Any,
get_next_checkruns_page: Callable[[List[Dict[str, Dict[str, Any]]], int, Any], Any],
get_next_checksuites: Callable[[Any], Any]
) -> Dict[str, Tuple[str, str]]:
conclusions = {}
def add_conclusions(edges: Any) -> None:
for edge_idx, edge in enumerate(edges):
node = edge["node"]
workflow_run = node["workflowRun"]
checkruns = node["checkRuns"]
if workflow_run is not None:
workflow_name = workflow_run["workflow"]["name"]
workflow_conclusion = node["conclusion"]
# Do not override existing status with cancelled
if workflow_conclusion == "CANCELLED" and workflow_name in conclusions:
continue
conclusions[workflow_name] = (workflow_conclusion, node["url"])
has_failing_check = False
while checkruns is not None:
for checkrun_node in checkruns["nodes"]:
if not isinstance(checkrun_node, dict):
warn(f"Expected dictionary, but got {type(checkrun_node)}")
continue
if checkrun_node["conclusion"] == 'FAILURE':
has_failing_check = True
conclusions[f'{get_check_run_name_prefix(workflow_run)}{checkrun_node["name"]}'] = (
checkrun_node["conclusion"], checkrun_node["detailsUrl"]
)
if bool(checkruns["pageInfo"]["hasNextPage"]):
checkruns = get_next_checkruns_page(edges, edge_idx, checkruns)
else:
checkruns = None
# Github doesn't set conclusion to failure if a job is still pending
if workflow_run is not None and has_failing_check:
conclusions[workflow_run["workflow"]["name"]] = ("FAILURE", node["url"])
add_conclusions(checksuites["edges"])
while bool(checksuites["pageInfo"]["hasNextPage"]):
checksuites = get_next_checksuites(checksuites)
add_conclusions(checksuites["edges"])
return conclusions
def parse_args() -> Any:
from argparse import ArgumentParser
parser = ArgumentParser("Merge PR into default branch")
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("--on-green", action="store_true")
parser.add_argument("--on-mandatory", action="store_true")
parser.add_argument("--land-checks", action="store_true")
parser.add_argument("--revert", action="store_true")
parser.add_argument("--force", action="store_true")
parser.add_argument("--comment-id", type=int)
parser.add_argument("--reason", type=str)
parser.add_argument("pr_num", type=int)
return parser.parse_args()
def can_skip_internal_checks(pr: "GitHubPR", comment_id: Optional[int] = None) -> bool:
if comment_id is None:
return False
comment = pr.get_comment_by_id(comment_id)
if comment.editor_login is not None:
return False
return comment.author_login == "facebook-github-bot"
@dataclass
class GitHubComment:
body_text: str
author_login: str
author_association: str
editor_login: Optional[str]
database_id: int
class GitHubPR:
def __init__(self, org: str, project: str, pr_num: int) -> None:
assert isinstance(pr_num, int)
self.org = org
self.project = project
self.pr_num = pr_num
self.info = gh_get_pr_info(org, project, pr_num)
self.changed_files: Optional[List[str]] = None
self.labels: Optional[List[str]] = None
self.conclusions: Optional[Dict[str, Tuple[str, str]]] = None
self.comments: Optional[List[GitHubComment]] = None
self._authors: Optional[List[Tuple[str, str]]] = None
self._reviews: Optional[List[Tuple[str, str]]] = None
def is_closed(self) -> bool:
return bool(self.info["closed"])
def is_cross_repo(self) -> bool:
return bool(self.info["isCrossRepository"])
def base_ref(self) -> str:
return cast(str, self.info["baseRefName"])
def default_branch(self) -> str:
return cast(str, self.info["baseRepository"]["defaultBranchRef"]["name"])
def head_ref(self) -> str:
return cast(str, self.info["headRefName"])
def is_ghstack_pr(self) -> bool:
return RE_GHSTACK_HEAD_REF.match(self.head_ref()) is not None
def is_base_repo_private(self) -> bool:
return bool(self.info["baseRepository"]["isPrivate"])
def get_changed_files_count(self) -> int:
return int(self.info["changedFiles"])
def last_pushed_at(self) -> datetime:
return datetime.fromisoformat(self.last_commit()['pushedDate'][:-1])
def last_commit(self) -> Any:
return self.info["commits"]["nodes"][-1]["commit"]
def get_changed_files(self) -> List[str]:
if self.changed_files is None:
info = self.info
self.changed_files = []
# Do not try to fetch more than 10K files
for _ in range(100):
self.changed_files += [x["path"] for x in info["files"]["nodes"]]
if not info["files"]["pageInfo"]["hasNextPage"]:
break
rc = gh_graphql(GH_GET_PR_NEXT_FILES_QUERY,
name=self.project,
owner=self.org,
number=self.pr_num,
cursor=info["files"]["pageInfo"]["endCursor"])
info = rc["data"]["repository"]["pullRequest"]
if len(self.changed_files) != self.get_changed_files_count():
raise RuntimeError("Changed file count mismatch")
return self.changed_files
def _get_reviews(self) -> List[Tuple[str, str]]:
if self._reviews is None:
self._reviews = []
info = self.info
for _ in range(100):
nodes = info["reviews"]["nodes"]
self._reviews = [(node["author"]["login"], node["state"]) for node in nodes] + self._reviews
if not info["reviews"]["pageInfo"]["hasPreviousPage"]:
break
rc = gh_graphql(GH_GET_PR_PREV_REVIEWS_QUERY,
name=self.project,
owner=self.org,
number=self.pr_num,
cursor=info["reviews"]["pageInfo"]["startCursor"])
info = rc["data"]["repository"]["pullRequest"]
reviews = {}
for (author, state) in self._reviews:
if state != "COMMENTED":
reviews[author] = state
return list(reviews.items())
def get_approved_by(self) -> List[str]:
return [login for (login, state) in self._get_reviews() if state == "APPROVED"]
def get_commit_count(self) -> int:
return int(self.info["commits_with_authors"]["totalCount"])
def get_pr_creator_login(self) -> str:
return cast(str, self.info["author"]["login"])
def _fetch_authors(self) -> List[Tuple[str, str]]:
if self._authors is not None:
return self._authors
authors: List[Tuple[str, str]] = []
def add_authors(info: Dict[str, Any]) -> None:
for node in info["commits_with_authors"]["nodes"]:
author_node = node["commit"]["author"]
user_node = author_node["user"]
author = f"{author_node['name']} <{author_node['email']}>"
if user_node is None:
# If author is not github user, user node will be null
authors.append(("", author))
else:
authors.append((cast(str, user_node["login"]), author))
info = self.info
for _ in range(100):
add_authors(info)
if not info["commits_with_authors"]["pageInfo"]["hasNextPage"]:
break
rc = gh_graphql(GH_GET_PR_NEXT_AUTHORS_QUERY,
name=self.project,
owner=self.org,
number=self.pr_num,
cursor=info["commits_with_authors"]["pageInfo"]["endCursor"])
info = rc["data"]["repository"]["pullRequest"]
self._authors = authors
return authors
def get_committer_login(self, num: int = 0) -> str:
return self._fetch_authors()[num][0]
def get_committer_author(self, num: int = 0) -> str:
return self._fetch_authors()[num][1]
def get_labels(self) -> List[str]:
if self.labels is not None:
return self.labels
labels = [node['node']['name'] for node in self.info["labels"]["edges"]] if "labels" in self.info else []
self.labels = labels
return self.labels
def get_checkrun_conclusions(self) -> Dict[str, Tuple[str, str]]:
""" Returns dict of checkrun -> [conclusion, url] """
if self.conclusions is not None:
return self.conclusions
orig_last_commit = self.info["commits"]["nodes"][-1]["commit"]
def get_pr_next_check_runs(edges: List[Dict[str, Dict[str, Any]]], edge_idx: int, checkruns: Any) -> Any:
rc = gh_graphql(GH_GET_PR_NEXT_CHECK_RUNS,
name=self.project,
owner=self.org,
number=self.pr_num,
cs_cursor=edges[edge_idx - 1]["cursor"] if edge_idx > 0 else None,
cr_cursor=checkruns["pageInfo"]["endCursor"])
last_commit = rc["data"]["repository"]["pullRequest"]["commits"]["nodes"][-1]["commit"]
checkruns = last_commit["checkSuites"]["nodes"][-1]["checkRuns"]
return checkruns
def get_pr_next_checksuites(checksuites: Any) -> Any:
rc = gh_graphql(GH_GET_PR_NEXT_CHECKSUITES,
name=self.project,
owner=self.org,
number=self.pr_num,
cursor=checksuites["edges"][-1]["cursor"])
info = rc["data"]["repository"]["pullRequest"]
last_commit = info["commits"]["nodes"][-1]["commit"]
if last_commit["oid"] != orig_last_commit["oid"]:
raise RuntimeError("Last commit changed on PR")
return last_commit["checkSuites"]
checksuites = orig_last_commit["checkSuites"]
self.conclusions = add_workflow_conclusions(checksuites, get_pr_next_check_runs, get_pr_next_checksuites)
return self.conclusions
def get_authors(self) -> Dict[str, str]:
rc = {}
# TODO: replace with `self.get_commit_count()` when GraphQL pagination can be used
# to fetch all commits, see https://gist.github.com/malfet/4f35321b0c9315bcd7116c7b54d83372
# and https://support.github.com/ticket/enterprise/1642/1659119
if self.get_commit_count() <= 250:
assert len(self._fetch_authors()) == self.get_commit_count()
for idx in range(len(self._fetch_authors())):
rc[self.get_committer_login(idx)] = self.get_committer_author(idx)
return rc
def get_author(self) -> str:
authors = self.get_authors()
if len(authors) == 1:
return next(iter(authors.values()))
creator = self.get_pr_creator_login()
# If PR creator is not among authors
# Assume it was authored by first commit author
if creator not in authors:
return self.get_committer_author(0)
return authors[creator]
def get_title(self) -> str:
return cast(str, self.info["title"])
def get_body(self) -> str:
return cast(str, self.info["body"])
def get_merge_commit(self) -> Optional[str]:
mc = self.info["mergeCommit"]
return mc["oid"] if mc is not None else None
def get_pr_url(self) -> str:
return f"https://github.com/{self.org}/{self.project}/pull/{self.pr_num}"
@staticmethod
def _comment_from_node(node: Any) -> GitHubComment:
editor = node["editor"]
return GitHubComment(body_text=node["bodyText"],
author_login=node["author"]["login"],
author_association=node["authorAssociation"],
editor_login=editor["login"] if editor else None,
database_id=node["databaseId"]
)
def get_comments(self) -> List[GitHubComment]:
if self.comments is not None:
return self.comments
self.comments = []
info = self.info["comments"]
# Do not try to fetch more than 10K comments
for _ in range(100):
self.comments = [self._comment_from_node(node) for node in info["nodes"]] + self.comments
if not info["pageInfo"]["hasPreviousPage"]:
break
rc = gh_graphql(GH_GET_PR_PREV_COMMENTS,
name=self.project,
owner=self.org,
number=self.pr_num,
cursor=info["pageInfo"]["startCursor"])
info = rc["data"]["repository"]["pullRequest"]["comments"]
return self.comments
def get_last_comment(self) -> GitHubComment:
return self._comment_from_node(self.info["comments"]["nodes"][-1])
def get_comment_by_id(self, database_id: int) -> GitHubComment:
if self.comments is None:
# Fastpath - try searching in partial prefetched comments
for node in self.info["comments"]["nodes"]:
comment = self._comment_from_node(node)
if comment.database_id == database_id:
return comment
for comment in self.get_comments():
if comment.database_id == database_id:
return comment
raise RuntimeError(f"Comment with id {database_id} not found")
def get_diff_revision(self) -> Optional[str]:
rc = RE_DIFF_REV.search(self.get_body())
return rc.group(1) if rc is not None else None
def has_internal_changes(self) -> bool:
checkrun_name = "Meta Internal-Only Changes Check"
if self.get_diff_revision() is None:
return False
checks = self.get_checkrun_conclusions()
if checks is None or checkrun_name not in checks:
return False
return checks[checkrun_name][0] != "SUCCESS"
def merge_ghstack_into(self, repo: GitRepo, force: bool, comment_id: Optional[int] = None) -> None:
assert self.is_ghstack_pr()
# For ghstack, cherry-pick commits based from origin
orig_ref = f"{repo.remote}/{re.sub(r'/head$', '/orig', self.head_ref())}"
rev_list = repo.revlist(f"{self.default_branch()}..{orig_ref}")
for idx, rev in enumerate(reversed(rev_list)):
msg = repo.commit_message(rev)
m = RE_PULL_REQUEST_RESOLVED.search(msg)
if m is None:
raise RuntimeError(f"Could not find PR-resolved string in {msg} of ghstacked PR {self.pr_num}")
if self.org != m.group('owner') or self.project != m.group('repo'):
raise RuntimeError(f"PR {m.group('number')} resolved to wrong owner/repo pair")
pr_num = int(m.group('number'))
commit_msg = self.gen_commit_message(filter_ghstack=True)
if pr_num != self.pr_num:
pr = GitHubPR(self.org, self.project, pr_num)
if pr.is_closed():
print(f"Skipping {idx+1} of {len(rev_list)} PR (#{pr_num}) as its already been merged")
continue
commit_msg = pr.gen_commit_message(filter_ghstack=True)
# Raises exception if matching rule is not found
find_matching_merge_rule(pr, repo, force=force, skip_internal_checks=can_skip_internal_checks(self, comment_id))
repo.cherry_pick(rev)
repo.amend_commit_message(commit_msg)
def gen_commit_message(self, filter_ghstack: bool = False) -> str:
""" Fetches title and body from PR description
adds reviewed by, pull request resolved and optionally
filters out ghstack info """
# Adding the url here makes it clickable within the Github UI
approved_by_urls = ', '.join(prefix_with_github_url(login) for login in self.get_approved_by())
msg = self.get_title() + f" (#{self.pr_num})\n\n"
msg += self.get_body() if not filter_ghstack else re.sub(RE_GHSTACK_DESC, "", self.get_body())
msg += f"\nPull Request resolved: {self.get_pr_url()}\n"
msg += f"Approved by: {approved_by_urls}\n"
return msg
def merge_into(self, repo: GitRepo, *,
force: bool = False,
dry_run: bool = False,
comment_id: Optional[int] = None) -> None:
# Raises exception if matching rule is not found
find_matching_merge_rule(self, repo, force=force, skip_internal_checks=can_skip_internal_checks(self, comment_id))
self.merge_changes(repo, force, comment_id)
repo.push(self.default_branch(), dry_run)
if not dry_run:
gh_add_labels(self.org, self.project, self.pr_num, ["merged"])
def merge_changes(self,
repo: GitRepo,
force: bool = False,
comment_id: Optional[int] = None,
branch: Optional[str] = None) -> None:
branch_to_merge_into = self.default_branch() if branch is None else branch
if repo.current_branch() != branch_to_merge_into:
repo.checkout(branch_to_merge_into)
if not self.is_ghstack_pr():
msg = self.gen_commit_message()
pr_branch_name = f"__pull-request-{self.pr_num}__init__"
repo.fetch(f"pull/{self.pr_num}/head", pr_branch_name)
repo._run_git("merge", "--squash", pr_branch_name)
repo._run_git("commit", f"--author=\"{self.get_author()}\"", "-m", msg)
else:
self.merge_ghstack_into(repo, force, comment_id=comment_id)
def create_land_time_check_branch(self,
repo: GitRepo,
branch: str,
force: bool = False,
comment_id: Optional[int] = None,) -> str:
self.merge_changes(repo, branch=branch, force=force, comment_id=comment_id)
land_check_branch = f'landchecks/{self.pr_num}'
try:
repo._run_git('branch', "-D", land_check_branch)
except Exception:
pass
repo._run_git('checkout', "-b", land_check_branch)
repo._run_git('push', '-u', 'origin', land_check_branch, '--force')
commit = repo.get_commit('HEAD').commit_hash
return commit
class MandatoryChecksMissingError(Exception):
pass
class PostCommentError(Exception):
pass
@dataclass
class MergeRule:
name: str
patterns: List[str]
approved_by: List[str]
mandatory_checks_name: Optional[List[str]]
def read_merge_rules(repo: Optional[GitRepo], org: str, project: str) -> List[MergeRule]:
from pathlib import Path
repo_relative_rules_path = Path(".github") / "merge_rules.json"
if repo is None:
json_data = _fetch_url(
f"https://api.github.com/repos/{org}/{project}/contents/{repo_relative_rules_path}",
headers={'Accept': 'application/vnd.github.v3+json'},
reader=json.load,
)
content = base64.b64decode(json_data["content"])
return cast(List[MergeRule], json.loads(content, object_hook=lambda x: MergeRule(**x)))
else:
rules_path = Path(repo.repo_dir) / repo_relative_rules_path
if not rules_path.exists():
print(f"{rules_path} does not exist, returning empty rules")
return []
with open(rules_path) as fp:
rc = json.load(fp, object_hook=lambda x: MergeRule(**x))
return cast(List[MergeRule], rc)
def find_matching_merge_rule(pr: GitHubPR,
repo: Optional[GitRepo] = None,
force: bool = False,
skip_internal_checks: bool = False
) -> MergeRule:
"""Returns merge rule matching to this pr or raises an exception"""
changed_files = pr.get_changed_files()
approved_by = set(pr.get_approved_by())
rules = read_merge_rules(repo, pr.org, pr.project)
reject_reason = f"PR {pr.pr_num} does not match merge rules"
# Used to determine best rejection reason
# Score 0 to 10K - how many files rule matched
# Score 10K - matched all files, but no overlapping approvers
# Score 20K - matched all files and approvers, but mandatory checks are pending
# Score 30k - Matched all files and approvers, but mandatory checks failed
reject_reason_score = 0
for rule in rules:
rule_name = rule.name
patterns_re = patterns_to_regex(rule.patterns)
non_matching_files = []
for fname in changed_files:
if not patterns_re.match(fname):
non_matching_files.append(fname)
if len(non_matching_files) > 0:
num_matching_files = len(changed_files) - len(non_matching_files)
if num_matching_files > reject_reason_score:
reject_reason_score = num_matching_files
reject_reason = (f"{num_matching_files} files matched rule {rule_name}, but there are still non-matching files: " +
f"{','.join(non_matching_files[:5])}{', ...' if len(non_matching_files) > 5 else ''}")
continue
# If rule needs approvers but PR has not been reviewed, skip it
if len(rule.approved_by) > 0 and len(approved_by) == 0:
if reject_reason_score < 10000:
reject_reason_score = 10000
reject_reason = f"Matched rule {rule_name}, but PR #{pr.pr_num} has not been reviewed yet"
continue
rule_approvers_set = set()
for approver in rule.approved_by:
if "/" in approver:
org, name = approver.split("/")
rule_approvers_set.update(gh_get_team_members(org, name))
else:
rule_approvers_set.add(approver)
approvers_intersection = approved_by.intersection(rule_approvers_set)
# If rule requires approvers but they aren't the ones that reviewed PR
if len(approvers_intersection) == 0 and len(rule_approvers_set) > 0:
if reject_reason_score < 10000:
reject_reason_score = 10000
reject_reason = (f"Matched rule {rule_name}, but PR #{pr.pr_num} was not reviewed yet by any of: " +
f"{', '.join(list(rule_approvers_set)[:5])}{', ...' if len(rule_approvers_set) > 5 else ''}")
continue
mandatory_checks = rule.mandatory_checks_name if rule.mandatory_checks_name is not None else []
checks = pr.get_checkrun_conclusions()
required_checks = filter(lambda x: force is False or "CLA Check" in x, mandatory_checks)
[pending_checks, failed_checks] = categorize_checks(checks, required_checks)
if len(failed_checks) > 0:
if reject_reason_score < 30000:
reject_reason_score = 30000
reject_reason = ("Refusing to merge as mandatory check(s) " +
checks_to_str(failed_checks) + f" failed for rule {rule_name}")
continue
elif len(pending_checks) > 0:
if reject_reason_score < 20000:
reject_reason_score = 20000
reject_reason = f"Refusing to merge as mandatory check(s) {checks_to_str(pending_checks)}"
reject_reason += f" are pending/not yet run for rule {rule_name}"
continue
if not skip_internal_checks and pr.has_internal_changes():
raise RuntimeError("This PR has internal changes and must be landed via Phabricator")
return rule
if reject_reason_score == 20000:
raise MandatoryChecksMissingError(reject_reason)
raise RuntimeError(reject_reason)
def get_land_checkrun_conclusions(org: str, project: str, commit: str) -> Dict[str, Tuple[str, str]]:
def get_commit_next_check_runs(edges: List[Dict[str, Dict[str, Any]]], edge_idx: int, checkruns: Any) -> Any:
rc = gh_graphql(GH_GET_COMMIT_NEXT_CHECK_RUNS,
name=project,
owner=org,
cs_cursor=edges[edge_idx - 1]["cursor"] if edge_idx > 0 else None,
cr_cursor=checkruns["pageInfo"]["endCursor"],
commit=commit)
return rc["data"]["repository"]["object"]["checkSuites"]["nodes"][-1]["checkRuns"]
def get_commit_next_checksuites(checksuites: Any) -> Any:
rc = gh_graphql(GH_GET_COMMIT_NEXT_CHECKSUITES,
name=project,
owner=org,
commit=commit,
cursor=checksuites["edges"][-1]["cursor"])
info = rc["data"]["repository"]["object"]
return info["checkSuites"]
land_check_info = gh_get_land_check_info(org, project, commit)
checksuites = land_check_info["checkSuites"]
return add_workflow_conclusions(checksuites, get_commit_next_check_runs, get_commit_next_checksuites)
def checks_to_str(checks: List[Tuple[str, Optional[str]]]) -> str:
return ", ".join(f"[{c[0]}]({c[1]})" if c[1] is not None else c[0] for c in checks)
def pr_get_checks_with_lambda(pr: GitHubPR, status_check: Callable[[Optional[str]], bool]) -> List[Tuple[str, str]]:
checks = pr.get_checkrun_conclusions()
return [(name, status[1]) for name, status in checks.items() if status_check(status[0])]
def pr_get_pending_checks(pr: GitHubPR) -> List[Tuple[str, str]]:
return pr_get_checks_with_lambda(pr, lambda x: x is None)
def pr_get_failed_checks(pr: GitHubPR) -> List[Tuple[str, str]]:
return pr_get_checks_with_lambda(pr, lambda x: x in ["FAILURE", "STARTUP_FAILURE"])
def validate_revert(repo: GitRepo, pr: GitHubPR, *,
comment_id: Optional[int] = None) -> Tuple[str, str]:
comment = pr.get_last_comment() if comment_id is None else pr.get_comment_by_id(comment_id)
if comment.editor_login is not None:
raise PostCommentError("Don't want to revert based on edited command")
author_association = comment.author_association
author_login = comment.author_login
allowed_reverters = ["COLLABORATOR", "MEMBER", "OWNER"]
# For some reason, one can not be a member of private repo, only CONTRIBUTOR
if pr.is_base_repo_private():
allowed_reverters.append("CONTRIBUTOR")
if author_association not in allowed_reverters:
raise PostCommentError((
f"Will not revert as @{author_login} is not one of "
f"[{', '.join(allowed_reverters)}], but instead is {author_association}."
))
skip_internal_checks = can_skip_internal_checks(pr, comment_id)
# Raises exception if matching rule is not found, but ignores all status checks
find_matching_merge_rule(pr, repo, force=True, skip_internal_checks=skip_internal_checks)
commit_sha = pr.get_merge_commit()
if commit_sha is None:
commits = repo.commits_resolving_gh_pr(pr.pr_num)
if len(commits) == 0:
raise PostCommentError("Can't find any commits resolving PR")
commit_sha = commits[0]
msg = repo.commit_message(commit_sha)
rc = RE_DIFF_REV.search(msg)
if rc is not None and not can_skip_internal_checks:
raise PostCommentError(f"Can't revert PR that was landed via phabricator as {rc.group(1)}")
return (author_login, commit_sha)
def try_revert(repo: GitRepo, pr: GitHubPR, *,
dry_run: bool = False,
comment_id: Optional[int] = None,
reason: Optional[str] = None) -> None:
def post_comment(msg: str) -> None:
gh_post_pr_comment(pr.org, pr.project, pr.pr_num, msg, dry_run=dry_run)
try:
author_login, commit_sha = validate_revert(repo, pr, comment_id=comment_id)
except PostCommentError as e:
return post_comment(str(e))
revert_msg = f"\nReverted {pr.get_pr_url()} on behalf of {prefix_with_github_url(author_login)}"
revert_msg += f" due to {reason}\n" if reason is not None else "\n"
repo.checkout(pr.default_branch())
repo.revert(commit_sha)
msg = repo.commit_message("HEAD")
msg = re.sub(RE_PULL_REQUEST_RESOLVED, "", msg)
msg += revert_msg
repo.amend_commit_message(msg)
repo.push(pr.default_branch(), dry_run)
post_comment(f"@{pr.get_pr_creator_login()} your PR has been successfully reverted.")
if not dry_run:
gh_add_labels(pr.org, pr.project, pr.pr_num, ["reverted"])
gh_post_commit_comment(pr.org, pr.project, commit_sha, revert_msg)
def prefix_with_github_url(suffix_str: str) -> str:
return f"https://github.com/{suffix_str}"
def check_for_sev(org: str, project: str, force: bool) -> None:
if force:
return
response = cast(
Dict[str, Any],
fetch_json(
"https://api.github.com/search/issues",
params={"q": f'repo:{org}/{project} is:open is:issue label:"ci: sev"'},
),
)
if response["total_count"] != 0:
for item in response["items"]:
if "merge blocking" in item["body"].lower():
raise RuntimeError(
"Not merging any PRs at the moment because there is a "
+ "merge blocking https://github.com/pytorch/pytorch/labels/ci:%20sev issue open at: \n"
+ f"{item['html_url']}"
)
return
def validate_land_time_checks(org: str, project: str, commit: str) -> None:
checks = get_land_checkrun_conclusions(org, project, commit)
if len(checks) == 0:
raise MandatoryChecksMissingError("Refusing to merge as land check(s) are not yet run")
[pending_checks, failed_checks] = categorize_checks(checks, checks)
if len(failed_checks) > 0:
raise RuntimeError(f"Failed to merge; some land checks failed: {checks_to_str(failed_checks)}")
if len(pending_checks) > 0:
raise MandatoryChecksMissingError(f"Refusing to merge as land check(s) {checks_to_str(pending_checks)} are not yet run")
def has_label(labels: List[str], pattern: Pattern[str] = CIFLOW_LABEL) -> bool:
return len(list(filter(pattern.match, labels))) > 0
def categorize_checks(check_runs: Dict[str, Tuple[str, str]],
required_checks: Iterable[str]) -> Tuple[List[Tuple[str, Optional[str]]], List[Tuple[str, Optional[str]]]]:
pending_checks: List[Tuple[str, Optional[str]]] = []
failed_checks: List[Tuple[str, Optional[str]]] = []
for checkname in required_checks:
if checkname not in check_runs:
pending_checks.append((checkname, None))
elif check_runs[checkname][0] is None:
pending_checks.append((checkname, check_runs[checkname][1]))
elif (check_runs[checkname][0].upper() != 'SUCCESS'
and check_runs[checkname][0].upper() != 'SKIPPED'
and check_runs[checkname][0].upper() != 'NEUTRAL'):
failed_checks.append((checkname, check_runs[checkname][1]))
return (pending_checks, failed_checks)
def merge(pr_num: int, repo: GitRepo,
dry_run: bool = False,
force: bool = False,
comment_id: Optional[int] = None,
mandatory_only: bool = False,
on_green: bool = False,
land_checks: bool = False,
timeout_minutes: int = 400,
stale_pr_days: int = 3) -> None:
repo = GitRepo(get_git_repo_dir(), get_git_remote_name())
org, project = repo.gh_owner_and_name()
pr = GitHubPR(org, project, pr_num)
initial_commit_sha = pr.last_commit()['oid']
explainer = TryMergeExplainer(force, on_green, land_checks, pr.get_labels(), pr.pr_num, org, project)
on_green, land_checks = explainer.get_flags()
land_check_commit = None
check_for_sev(org, project, force)
if force or can_skip_internal_checks(pr, comment_id):
# do not wait for any pending signals if PR is closed as part of co-development process
gh_post_pr_comment(org, project, pr.pr_num, explainer.get_merge_message())
return pr.merge_into(repo, dry_run=dry_run, force=force, comment_id=comment_id)
if land_checks:
land_check_commit = pr.create_land_time_check_branch(repo, 'viable/strict', force=force, comment_id=comment_id)
gh_post_pr_comment(org, project, pr.pr_num, explainer.get_merge_message(land_check_commit))
if (datetime.utcnow() - pr.last_pushed_at()).days > stale_pr_days:
raise RuntimeError("This PR is too stale; the last push date was more than 3 days ago. Please rebase and try again.")
start_time = time.time()
last_exception = ''
elapsed_time = 0.0
while elapsed_time < timeout_minutes * 60:
check_for_sev(org, project, force)
current_time = time.time()
elapsed_time = current_time - start_time
print(f"Attempting merge of https://github.com/{org}/{project}/pull/{pr_num} ({elapsed_time / 60} minutes elapsed)")
pr = GitHubPR(org, project, pr_num)
if initial_commit_sha != pr.last_commit()['oid']:
raise RuntimeError("New commits were pushed while merging. Please rerun the merge command.")
try:
find_matching_merge_rule(pr, repo)
pending = pr_get_pending_checks(pr)
failing = pr_get_failed_checks(pr)
# HACK until GitHub will be better about surfacing those
startup_failures = pr_get_checks_with_lambda(pr, lambda x: x == "STARTUP_FAILURE")
if len(startup_failures) > 0:
raise RuntimeError(f"{len(failing)} STARTUP failures reported, please check workflows syntax! " +
' ,'.join(f"[{x[0]}]({x[1]})" for x in startup_failures[:5]))
# END of HACK
if (not mandatory_only and on_green) and len(failing) > 0:
raise RuntimeError(f"{len(failing)} additional jobs have failed, first few of them are: " +
' ,'.join(f"[{x[0]}]({x[1]})" for x in failing[:5]))
if (not mandatory_only and on_green) and len(pending) > 0:
raise MandatoryChecksMissingError(f"Still waiting for {len(pending)} additional jobs to finish, " +
f"first few of them are: {' ,'.join(x[0] for x in pending[:5])}")
if land_checks and land_check_commit is not None:
validate_land_time_checks(org, project, land_check_commit)
return pr.merge_into(repo, dry_run=dry_run, force=force, comment_id=comment_id)
except MandatoryChecksMissingError as ex:
last_exception = str(ex)
print(f"Merge of https://github.com/{org}/{project}/pull/{pr_num} failed due to: {ex}. Retrying in 5 min")
time.sleep(5 * 60)
# Finally report timeout back
msg = f"Merged timed out after {timeout_minutes} minutes. Please contact the pytorch_dev_infra team."
msg += f"The last exception was: {last_exception}"
if not dry_run:
gh_add_labels(org, project, pr_num, ["land-failed"])
raise RuntimeError(msg)
def main() -> None:
args = parse_args()
repo = GitRepo(get_git_repo_dir(), get_git_remote_name())
org, project = repo.gh_owner_and_name()
pr = GitHubPR(org, project, args.pr_num)
def handle_exception(e: Exception, msg: str = "Merge failed") -> None:
msg += f" due to {e}"
run_url = os.getenv("GH_RUN_URL")
if run_url is not None:
msg += f"\nRaised by {run_url}"
if args.land_checks:
msg += get_land_check_troubleshooting_message()
gh_post_pr_comment(org, project, args.pr_num, msg, dry_run=args.dry_run)
import traceback
traceback.print_exc()
if args.revert:
try:
gh_post_pr_comment(org, project, args.pr_num, get_revert_message(org, project, pr.pr_num), args.dry_run)
try_revert(repo, pr, dry_run=args.dry_run, comment_id=args.comment_id, reason=args.reason)
except Exception as e:
handle_exception(e, f"Reverting PR {args.pr_num} failed")
return
if pr.is_closed():
gh_post_pr_comment(org, project, args.pr_num, f"Can't merge closed PR #{args.pr_num}", dry_run=args.dry_run)
return
if pr.is_cross_repo() and pr.is_ghstack_pr():
gh_post_pr_comment(org, project, args.pr_num, "Cross-repo ghstack merges are not supported", dry_run=args.dry_run)
return
try:
merge(args.pr_num, repo,
dry_run=args.dry_run,
force=args.force,
comment_id=args.comment_id,
on_green=args.on_green,
mandatory_only=args.on_mandatory,
land_checks=args.land_checks)
except Exception as e:
handle_exception(e)
if __name__ == "__main__":
main()
| pytorch-master | .github/scripts/trymerge.py |
import os
import re
from typing import List, Pattern, Tuple, Optional
BOT_COMMANDS_WIKI = "https://github.com/pytorch/pytorch/wiki/Bot-commands"
CIFLOW_LABEL = re.compile(r"^ciflow/.+")
CIFLOW_TRUNK_LABEL = re.compile(r"^ciflow/trunk")
OFFICE_HOURS_LINK = "https://github.com/pytorch/pytorch/wiki/Dev-Infra-Office-Hours"
CONTACT_US = f"Please reach out to the [PyTorch DevX Team]({OFFICE_HOURS_LINK}) with feedback or questions!"
ALTERNATIVES = (
"If this is not the intended behavior, feel free to use some "
+ f"of the other merge options in the [wiki]({BOT_COMMANDS_WIKI})."
)
LAND_CHECK_ROLLOUT = "https://github.com/pytorch/test-infra/blob/main/torchci/lib/bot/rolloutUtils.ts#L1-L34"
def has_label(labels: List[str], pattern: Pattern[str] = CIFLOW_LABEL) -> bool:
return len(list(filter(pattern.match, labels))) > 0
class TryMergeExplainer(object):
force: bool
on_green: bool
land_checks: bool
labels: List[str]
pr_num: int
org: str
project: str
has_trunk_label: bool
has_ciflow_label: bool
def __init__(
self,
force: bool,
on_green: bool,
land_checks: bool,
labels: List[str],
pr_num: int,
org: str,
project: str,
):
self.force = force
self.on_green = on_green
self.land_checks = land_checks
self.labels = labels
self.pr_num = pr_num
self.org = org
self.project = project
self.get_flags()
def get_flags(self) -> Tuple[bool, bool]:
self.has_trunk_label = has_label(self.labels, CIFLOW_TRUNK_LABEL)
self.has_ciflow_label = has_label(self.labels, CIFLOW_LABEL)
should_check_land_branch = self.land_checks and not self.has_trunk_label
should_check_green = self.on_green or self.has_ciflow_label
return (should_check_green, should_check_land_branch)
def _get_flag_msg(self) -> str:
if self.force:
return " the force (-f) flag."
elif self.on_green:
return " the green (-g) flag."
elif self.land_checks:
return (
" the land checks (-l) flag."
+ " If you did not specify this flag yourself, "
+ f" you are likely enrolled in the [land checks rollout]({LAND_CHECK_ROLLOUT})."
)
else:
return "out a flag."
def _get_land_check_progress(self, commit: Optional[str]) -> str:
if commit is not None:
return (
" and land check "
+ f"progress [here](https://hud.pytorch.org/{self.org}/{self.project}/commit/{commit})"
)
else:
return ""
def _get_flag_explanation_message(self) -> str:
if self.force:
return "This means your change will be merged **immediately**, bypassing any CI checks (ETA: 1-5 minutes)."
elif self.on_green:
return "This means that your change will be merged once all checks on your PR have passed (ETA: 0-4 Hours)."
elif self.land_checks:
if self.has_trunk_label:
land_check_msg_suffix = "have passed since you have added the `ciflow/trunk` label to your PR (ETA 0-4 Hours)."
else:
land_check_msg_suffix = (
"and the land checks have passed (**ETA 4 Hours**). "
)
land_check_msg_suffix += "If you need to coordinate lands between different changes and cannot risk a land race, "
land_check_msg_suffix += "please add the `ciflow/trunk` label to your PR and wait for signal to complete, "
land_check_msg_suffix += "and then land your changes in proper order."
land_check_msg_suffix += (
" Having `trunk`, `pull`, and `Lint` pre-run on a "
)
land_check_msg_suffix += (
"PR will bypass land checks and the ETA should be immediate."
)
return (
"This means that your change will be merged once all checks on your PR "
+ land_check_msg_suffix
)
else:
return "This means that your change will be merged once all checks on your PR have passed (ETA: 0-4 Hours)."
def get_merge_message(self, commit: Optional[str] = None) -> str:
message_prefix = "@pytorchbot successfully started a merge job."
progress_links = f"Check the current status [here]({os.getenv('GH_RUN_URL')}){self._get_land_check_progress(commit)}."
flag_message = f"The merge job was triggered with{self._get_flag_msg()}"
explanation_message = self._get_flag_explanation_message()
msg = message_prefix + " "
msg += progress_links + "\n"
msg += flag_message + " "
msg += explanation_message + " "
msg += ALTERNATIVES + "\n"
msg += CONTACT_US
return msg
def get_revert_message(org: str, project: str, pr_num: int) -> str:
msg = (
"@pytorchbot successfully started a revert job."
+ f" Check the current status [here]({os.getenv('GH_RUN_URL')}).\n"
)
msg += CONTACT_US
return msg
def get_land_check_troubleshooting_message() -> str:
return (
" If you believe this is an error, you can use the old behavior with `@pytorchbot merge -g`"
+ " (optionally with the `ciflow/trunk` to get land checks)"
+ ' or use `@pytorchbot merge -f "some reason here"`.'
+ f" For more information, see the [bot wiki]({BOT_COMMANDS_WIKI}). \n"
+ CONTACT_US
)
| pytorch-master | .github/scripts/trymerge_explainer.py |
#!/usr/bin/env python3
from dataclasses import asdict, dataclass, field
from pathlib import Path
from typing import Dict, Set, List, Iterable
import jinja2
import os
import sys
from typing_extensions import Literal, TypedDict
import generate_binary_build_matrix # type: ignore[import]
Arch = Literal["windows", "linux", "macos"]
GITHUB_DIR = Path(__file__).resolve().parent.parent
LABEL_CIFLOW_TRUNK = "ciflow/trunk"
LABEL_CIFLOW_BINARIES = "ciflow/binaries"
LABEL_CIFLOW_PERIODIC = "ciflow/periodic"
LABEL_CIFLOW_BINARIES_LIBTORCH = "ciflow/binaries_libtorch"
LABEL_CIFLOW_BINARIES_CONDA = "ciflow/binaries_conda"
LABEL_CIFLOW_BINARIES_WHEEL = "ciflow/binaries_wheel"
@dataclass
class CIFlowConfig:
# For use to enable workflows to run on pytorch/pytorch-canary
run_on_canary: bool = False
labels: Set[str] = field(default_factory=set)
# Certain jobs might not want to be part of the ciflow/[all,trunk] workflow
isolated_workflow: bool = False
def __post_init__(self) -> None:
if not self.isolated_workflow:
if LABEL_CIFLOW_PERIODIC not in self.labels:
self.labels.add(LABEL_CIFLOW_TRUNK)
class Config(TypedDict):
num_shards: int
runner: str
@dataclass
class BinaryBuildWorkflow:
os: str
build_configs: List[Dict[str, str]]
package_type: str
# Optional fields
build_environment: str = ''
abi_version: str = ''
ciflow_config: CIFlowConfig = field(default_factory=CIFlowConfig)
is_scheduled: str = ''
branches: str = 'nightly'
# Mainly for macos
cross_compile_arm64: bool = False
xcode_version: str = ''
def __post_init__(self) -> None:
if self.abi_version:
self.build_environment = f"{self.os}-binary-{self.package_type}-{self.abi_version}"
else:
self.build_environment = f"{self.os}-binary-{self.package_type}"
def generate_workflow_file(self, workflow_template: jinja2.Template) -> None:
output_file_path = GITHUB_DIR / f"workflows/generated-{self.build_environment}-{self.branches}.yml"
with open(output_file_path, "w") as output_file:
GENERATED = "generated" # Note that please keep the variable GENERATED otherwise phabricator will hide the whole file
output_file.writelines([f"# @{GENERATED} DO NOT EDIT MANUALLY\n"])
try:
content = workflow_template.render(asdict(self))
except Exception as e:
print(f"Failed on template: {workflow_template}", file=sys.stderr)
raise e
output_file.write(content)
if content[-1] != "\n":
output_file.write("\n")
print(output_file_path)
class OperatingSystem:
LINUX = "linux"
WINDOWS = "windows"
MACOS = "macos"
MACOS_ARM64 = "macos-arm64"
LINUX_BINARY_BUILD_WORFKLOWS = [
BinaryBuildWorkflow(
os=OperatingSystem.LINUX,
package_type="manywheel",
build_configs=generate_binary_build_matrix.generate_wheels_matrix(OperatingSystem.LINUX),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_WHEEL},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.LINUX,
package_type="conda",
build_configs=generate_binary_build_matrix.generate_conda_matrix(OperatingSystem.LINUX),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_CONDA},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.LINUX,
package_type="libtorch",
abi_version=generate_binary_build_matrix.CXX11_ABI,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.LINUX, generate_binary_build_matrix.CXX11_ABI
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_LIBTORCH},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.LINUX,
package_type="libtorch",
abi_version=generate_binary_build_matrix.PRE_CXX11_ABI,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.LINUX, generate_binary_build_matrix.PRE_CXX11_ABI
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_LIBTORCH},
isolated_workflow=True,
),
),
]
LINUX_BINARY_SMOKE_WORKFLOWS = [
BinaryBuildWorkflow(
os=OperatingSystem.LINUX,
package_type="manywheel",
build_configs=generate_binary_build_matrix.generate_wheels_matrix(
OperatingSystem.LINUX,
arches=["10.2"],
python_versions=["3.7"]),
branches="master",
),
BinaryBuildWorkflow(
os=OperatingSystem.LINUX,
package_type="libtorch",
abi_version=generate_binary_build_matrix.CXX11_ABI,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.LINUX, generate_binary_build_matrix.CXX11_ABI,
arches=["cpu"],
libtorch_variants=["shared-with-deps"],
),
branches="master",
),
BinaryBuildWorkflow(
os=OperatingSystem.LINUX,
package_type="libtorch",
abi_version=generate_binary_build_matrix.PRE_CXX11_ABI,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.LINUX, generate_binary_build_matrix.CXX11_ABI,
arches=["cpu"],
libtorch_variants=["shared-with-deps"],
),
branches="master",
),
]
WINDOWS_BINARY_BUILD_WORKFLOWS = [
BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS,
package_type="wheel",
build_configs=generate_binary_build_matrix.generate_wheels_matrix(OperatingSystem.WINDOWS),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_WHEEL},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS,
package_type="conda",
build_configs=generate_binary_build_matrix.generate_conda_matrix(OperatingSystem.WINDOWS),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_CONDA},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS,
package_type="libtorch",
abi_version=generate_binary_build_matrix.RELEASE,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.WINDOWS, generate_binary_build_matrix.RELEASE
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_LIBTORCH},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS,
package_type="libtorch",
abi_version=generate_binary_build_matrix.DEBUG,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.WINDOWS, generate_binary_build_matrix.DEBUG
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_LIBTORCH},
isolated_workflow=True,
),
),
]
WINDOWS_BINARY_SMOKE_WORKFLOWS = [
BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS,
package_type="wheel",
build_configs=generate_binary_build_matrix.generate_wheels_matrix(
OperatingSystem.WINDOWS,
arches=["11.3"],
python_versions=["3.7"]),
branches="master",
),
BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS,
package_type="libtorch",
abi_version=generate_binary_build_matrix.RELEASE,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.WINDOWS, generate_binary_build_matrix.RELEASE,
arches=["cpu"],
libtorch_variants=["shared-with-deps"],
),
branches="master",
),
BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS,
package_type="libtorch",
abi_version=generate_binary_build_matrix.DEBUG,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.WINDOWS, generate_binary_build_matrix.DEBUG,
arches=["cpu"],
libtorch_variants=["shared-with-deps"],
),
branches="master",
),
]
MACOS_BINARY_BUILD_WORKFLOWS = [
BinaryBuildWorkflow(
os=OperatingSystem.MACOS,
package_type="wheel",
build_configs=generate_binary_build_matrix.generate_wheels_matrix(OperatingSystem.MACOS),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_WHEEL},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.MACOS,
package_type="conda",
build_configs=generate_binary_build_matrix.generate_conda_matrix(OperatingSystem.MACOS),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_CONDA},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.MACOS,
package_type="libtorch",
abi_version=generate_binary_build_matrix.CXX11_ABI,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.MACOS, generate_binary_build_matrix.CXX11_ABI
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_LIBTORCH},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.MACOS,
package_type="libtorch",
abi_version=generate_binary_build_matrix.PRE_CXX11_ABI,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.MACOS, generate_binary_build_matrix.PRE_CXX11_ABI
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_LIBTORCH},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.MACOS_ARM64,
package_type="wheel",
build_configs=generate_binary_build_matrix.generate_wheels_matrix(OperatingSystem.MACOS),
cross_compile_arm64=True,
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_WHEEL},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.MACOS_ARM64,
package_type="conda",
cross_compile_arm64=True,
build_configs=generate_binary_build_matrix.generate_conda_matrix(OperatingSystem.MACOS_ARM64),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_CONDA},
isolated_workflow=True,
),
),
]
def main() -> None:
jinja_env = jinja2.Environment(
variable_start_string="!{{",
loader=jinja2.FileSystemLoader(str(GITHUB_DIR.joinpath("templates"))),
undefined=jinja2.StrictUndefined,
)
# not ported yet
template_and_workflows = [
(jinja_env.get_template("linux_binary_build_workflow.yml.j2"), LINUX_BINARY_BUILD_WORFKLOWS),
(jinja_env.get_template("linux_binary_build_workflow.yml.j2"), LINUX_BINARY_SMOKE_WORKFLOWS),
(jinja_env.get_template("windows_binary_build_workflow.yml.j2"), WINDOWS_BINARY_BUILD_WORKFLOWS),
(jinja_env.get_template("windows_binary_build_workflow.yml.j2"), WINDOWS_BINARY_SMOKE_WORKFLOWS),
(jinja_env.get_template("macos_binary_build_workflow.yml.j2"), MACOS_BINARY_BUILD_WORKFLOWS),
]
# Delete the existing generated files first, this should align with .gitattributes file description.
existing_workflows = GITHUB_DIR.glob("workflows/generated-*")
for w in existing_workflows:
try:
os.remove(w)
except Exception as e:
print(f"Error occurred when deleting file {w}: {e}")
for template, workflows in template_and_workflows:
# added Iterable check to appease the mypy gods
if not isinstance(workflows, Iterable):
raise Exception(f"How is workflows not iterable? {workflows}")
for workflow in workflows:
workflow.generate_workflow_file(workflow_template=template)
if __name__ == "__main__":
main()
| pytorch-master | .github/scripts/generate_ci_workflows.py |
import json
import subprocess
import sys
from enum import Enum
from pathlib import Path
from typing import NamedTuple, Optional
# From: https://docs.github.com/en/rest/reference/checks
class GitHubAnnotationLevel(str, Enum):
NOTICE = "notice"
WARNING = "warning"
FAILURE = "failure"
class GitHubAnnotation(NamedTuple):
path: str
start_line: int
end_line: int
start_column: Optional[int]
end_column: Optional[int]
annotation_level: GitHubAnnotationLevel
message: str
title: Optional[str]
raw_details: Optional[str]
PYTORCH_ROOT = Path(subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).decode('ascii').strip())
annotations = []
for line in sys.stdin:
lint_message = json.loads(line)
path = lint_message.get("path")
line = lint_message.get("line")
code = lint_message["code"]
severity = lint_message["severity"]
name = lint_message["name"]
description = lint_message.get("description")
# These fields are required by the GitHub API, but optional in lintrunner.
# If they don't exist, just skip.
if path is None or line is None:
print(f"No path/line for lint: ({code}) {name}", file=sys.stderr)
continue
# normalize path relative to git root
path = Path(path).relative_to(PYTORCH_ROOT)
annotations.append(GitHubAnnotation(
path=str(path),
start_line=int(line),
end_line=int(line),
start_column=None,
end_column=None,
annotation_level=GitHubAnnotationLevel.FAILURE,
message=description,
title=f"({code}) {name}",
raw_details=None,
)._asdict())
print(json.dumps(annotations), flush=True)
| pytorch-master | .github/scripts/convert_lintrunner_annotations_to_github.py |
#!/usr/bin/env python3
import argparse
import os
import subprocess
import re
from datetime import datetime
from distutils.util import strtobool
from pathlib import Path
LEADING_V_PATTERN = re.compile("^v")
TRAILING_RC_PATTERN = re.compile("-rc[0-9]*$")
LEGACY_BASE_VERSION_SUFFIX_PATTERN = re.compile("a0$")
class NoGitTagException(Exception):
pass
def get_pytorch_root() -> Path:
return Path(subprocess.check_output(
['git', 'rev-parse', '--show-toplevel']
).decode('ascii').strip())
def get_tag() -> str:
root = get_pytorch_root()
# We're on a tag
am_on_tag = (
subprocess.run(
['git', 'describe', '--tags', '--exact'],
cwd=root,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
).returncode == 0
)
tag = ""
if am_on_tag:
dirty_tag = subprocess.check_output(
['git', 'describe'],
cwd=root
).decode('ascii').strip()
# Strip leading v that we typically do when we tag branches
# ie: v1.7.1 -> 1.7.1
tag = re.sub(LEADING_V_PATTERN, "", dirty_tag)
# Strip trailing rc pattern
# ie: 1.7.1-rc1 -> 1.7.1
tag = re.sub(TRAILING_RC_PATTERN, "", tag)
return tag
def get_base_version() -> str:
root = get_pytorch_root()
dirty_version = open(root / 'version.txt', 'r').read().strip()
# Strips trailing a0 from version.txt, not too sure why it's there in the
# first place
return re.sub(LEGACY_BASE_VERSION_SUFFIX_PATTERN, "", dirty_version)
class PytorchVersion:
def __init__(
self,
gpu_arch_type: str,
gpu_arch_version: str,
no_build_suffix: bool,
) -> None:
self.gpu_arch_type = gpu_arch_type
self.gpu_arch_version = gpu_arch_version
self.no_build_suffix = no_build_suffix
def get_post_build_suffix(self) -> str:
if self.no_build_suffix:
return ""
if self.gpu_arch_type == "cuda":
return f"+cu{self.gpu_arch_version.replace('.', '')}"
return f"+{self.gpu_arch_type}{self.gpu_arch_version}"
def get_release_version(self) -> str:
if not get_tag():
raise NoGitTagException(
"Not on a git tag, are you sure you want a release version?"
)
return f"{get_tag()}{self.get_post_build_suffix()}"
def get_nightly_version(self) -> str:
date_str = datetime.today().strftime('%Y%m%d')
build_suffix = self.get_post_build_suffix()
return f"{get_base_version()}.dev{date_str}{build_suffix}"
def main() -> None:
parser = argparse.ArgumentParser(
description="Generate pytorch version for binary builds"
)
parser.add_argument(
"--no-build-suffix",
action="store_true",
help="Whether or not to add a build suffix typically (+cpu)",
default=strtobool(os.environ.get("NO_BUILD_SUFFIX", "False"))
)
parser.add_argument(
"--gpu-arch-type",
type=str,
help="GPU arch you are building for, typically (cpu, cuda, rocm)",
default=os.environ.get("GPU_ARCH_TYPE", "cpu")
)
parser.add_argument(
"--gpu-arch-version",
type=str,
help="GPU arch version, typically (10.2, 4.0), leave blank for CPU",
default=os.environ.get("GPU_ARCH_VERSION", "")
)
args = parser.parse_args()
version_obj = PytorchVersion(
args.gpu_arch_type,
args.gpu_arch_version,
args.no_build_suffix
)
try:
print(version_obj.get_release_version())
except NoGitTagException:
print(version_obj.get_nightly_version())
if __name__ == "__main__":
main()
| pytorch-master | .github/scripts/generate_pytorch_version.py |
#!/usr/bin/env python3
# Tests implemented in this file are relying on GitHub GraphQL APIs
# In order to avoid test flakiness, results of the queries
# are cached in gql_mocks.json
# PyTorch Lint workflow does not have GITHUB_TOKEN defined to avoid
# flakiness, so if you are making changes to merge_rules or
# GraphQL queries in trymerge.py, please make sure to delete `gql_mocks.json`
# And re-run the test locally with ones PAT
import json
import os
from hashlib import sha256
from trymerge import (find_matching_merge_rule,
get_land_checkrun_conclusions,
validate_land_time_checks,
gh_graphql,
gh_get_team_members,
read_merge_rules,
validate_revert,
GitHubPR,
MergeRule,
MandatoryChecksMissingError,
main as trymerge_main)
from gitutils import get_git_remote_name, get_git_repo_dir, GitRepo
from typing import Any, List, Optional
from unittest import TestCase, main, mock
from urllib.error import HTTPError
if 'GIT_REMOTE_URL' not in os.environ:
os.environ['GIT_REMOTE_URL'] = "https://github.com/pytorch/pytorch"
def mocked_gh_graphql(query: str, **kwargs: Any) -> Any:
gql_db_fname = os.path.join(os.path.dirname(__file__), "gql_mocks.json")
def get_mocked_queries() -> Any:
if not os.path.exists(gql_db_fname):
return {}
with open(gql_db_fname, encoding="utf-8") as f:
return json.load(f)
def save_mocked_queries(obj: Any) -> None:
with open(gql_db_fname, encoding="utf-8", mode="w") as f:
json.dump(obj, f, indent=2)
f.write("\n")
key = f"query_sha={sha256(query.encode('utf-8')).hexdigest()} " + " ".join([f"{k}={kwargs[k]}" for k in sorted(kwargs.keys())])
mocked_queries = get_mocked_queries()
if key in mocked_queries:
return mocked_queries[key]
try:
rc = gh_graphql(query, **kwargs)
except HTTPError as err:
if err.code == 401:
err_msg = "If you are seeing this message during workflow run, please make sure to update gql_mocks.json"
err_msg += f" locally, by deleting it and running {os.path.basename(__file__)} with "
err_msg += " GitHub Personal Access Token passed via GITHUB_TOKEN environment variable"
if os.getenv("GITHUB_TOKEN") is None:
err_msg = "Failed to update cached GraphQL queries as GITHUB_TOKEN is not defined." + err_msg
raise RuntimeError(err_msg) from err
mocked_queries[key] = rc
save_mocked_queries(mocked_queries)
return rc
def mock_parse_args(revert: bool = False,
force: bool = False) -> Any:
class Object(object):
def __init__(self) -> None:
self.revert = revert
self.force = force
self.pr_num = 76123
self.dry_run = True
self.comment_id = 0
self.on_mandatory = False
self.on_green = False
self.land_checks = False
self.reason = 'this is for testing'
return Object()
def mock_revert(repo: GitRepo, pr: GitHubPR, *,
dry_run: bool = False,
comment_id: Optional[int] = None,
reason: Optional[str] = None) -> None:
pass
def mock_merge(pr_num: int, repo: GitRepo,
dry_run: bool = False,
force: bool = False,
comment_id: Optional[int] = None,
mandatory_only: bool = False,
on_green: bool = False,
land_checks: bool = False,
timeout_minutes: int = 400,
stale_pr_days: int = 3) -> None:
pass
def mock_gh_get_info() -> Any:
return {"closed": False, "isCrossRepository": False}
def mocked_read_merge_rules_NE(repo: Any, org: str, project: str) -> List[MergeRule]:
return [
MergeRule(name="mock with nonexistent check",
patterns=["*"],
approved_by=[],
mandatory_checks_name=["Lint",
"Facebook CLA Check",
"nonexistent"],
),
]
def mocked_read_merge_rules(repo: Any, org: str, project: str) -> List[MergeRule]:
return [
MergeRule(name="super",
patterns=["*"],
approved_by=["pytorch/metamates"],
mandatory_checks_name=["Lint",
"Facebook CLA Check",
"pull / linux-xenial-cuda11.3-py3.7-gcc7 / build",
],
),
]
class DummyGitRepo(GitRepo):
def __init__(self) -> None:
super().__init__(get_git_repo_dir(), get_git_remote_name())
def commits_resolving_gh_pr(self, pr_num: int) -> List[str]:
return ["FakeCommitSha"]
def commit_message(self, ref: str) -> str:
return "super awsome commit message"
class TestGitHubPR(TestCase):
def test_merge_rules_valid(self) -> None:
"Test that merge_rules.json can be parsed"
repo = DummyGitRepo()
self.assertGreater(len(read_merge_rules(repo, "pytorch", "pytorch")), 1)
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
@mock.patch('trymerge.read_merge_rules', side_effect=mocked_read_merge_rules)
def test_match_rules(self, mocked_gql: Any, mocked_rmr: Any) -> None:
"Tests that PR passes merge rules"
pr = GitHubPR("pytorch", "pytorch", 77700)
repo = DummyGitRepo()
self.assertTrue(find_matching_merge_rule(pr, repo) is not None)
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
@mock.patch('trymerge.read_merge_rules', side_effect=mocked_read_merge_rules)
def test_lint_fails(self, mocked_gql: Any, mocked_rmr: Any) -> None:
"Tests that PR fails mandatory lint check"
pr = GitHubPR("pytorch", "pytorch", 74649)
repo = DummyGitRepo()
self.assertRaises(RuntimeError, lambda: find_matching_merge_rule(pr, repo))
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_get_last_comment(self, mocked_gql: Any) -> None:
"Tests that last comment can be fetched"
pr = GitHubPR("pytorch", "pytorch", 71759)
comment = pr.get_last_comment()
self.assertEqual(comment.author_login, "github-actions")
self.assertIsNone(comment.editor_login)
self.assertTrue("You've committed this PR" in comment.body_text)
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_get_author_null(self, mocked_gql: Any) -> None:
""" Tests that PR author can be computed
If reply contains NULL
"""
pr = GitHubPR("pytorch", "pytorch", 71759)
author = pr.get_author()
self.assertTrue(author is not None)
self.assertTrue("@" in author)
self.assertTrue(pr.get_diff_revision() is None)
# PR with multiple contributors, but creator id is not among authors
pr = GitHubPR("pytorch", "pytorch", 75095)
self.assertEqual(pr.get_pr_creator_login(), "mruberry")
author = pr.get_author()
self.assertTrue(author is not None)
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_large_diff(self, mocked_gql: Any) -> None:
"Tests that PR with 100+ files can be fetched"
pr = GitHubPR("pytorch", "pytorch", 73099)
self.assertTrue(pr.get_changed_files_count() > 100)
flist = pr.get_changed_files()
self.assertEqual(len(flist), pr.get_changed_files_count())
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_internal_changes(self, mocked_gql: Any) -> None:
"Tests that PR with internal changes is detected"
pr = GitHubPR("pytorch", "pytorch", 73969)
self.assertTrue(pr.has_internal_changes())
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_checksuites_pagination(self, mocked_gql: Any) -> None:
"Tests that PR with lots of checksuits can be fetched"
pr = GitHubPR("pytorch", "pytorch", 73811)
self.assertEqual(len(pr.get_checkrun_conclusions()), 104)
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_comments_pagination(self, mocked_gql: Any) -> None:
"Tests that PR with 50+ comments can be fetched"
pr = GitHubPR("pytorch", "pytorch", 31093)
self.assertGreater(len(pr.get_comments()), 50)
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_gql_complexity(self, mocked_gql: Any) -> None:
"Fetch comments and conclusions for PR with 60 commits"
# Previous version of GrapQL query used to cause HTTP/502 error
# see https://gist.github.com/malfet/9b93bc7eeddeaf1d84546efc4f0c577f
pr = GitHubPR("pytorch", "pytorch", 68111)
self.assertGreater(len(pr.get_comments()), 20)
self.assertGreater(len(pr.get_checkrun_conclusions()), 3)
self.assertGreater(pr.get_commit_count(), 60)
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_team_members(self, mocked_gql: Any) -> None:
"Test fetching team members works"
dev_infra_team = gh_get_team_members("pytorch", "pytorch-dev-infra")
self.assertGreater(len(dev_infra_team), 2)
with self.assertWarns(Warning):
non_existing_team = gh_get_team_members("pytorch", "qwertyuiop")
self.assertEqual(len(non_existing_team), 0)
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_get_author_many_commits(self, mocked_gql: Any) -> None:
""" Tests that authors for all commits can be fetched
"""
pr = GitHubPR("pytorch", "pytorch", 76118)
authors = pr.get_authors()
self.assertGreater(pr.get_commit_count(), 100)
self.assertGreater(len(authors), 50)
self.assertTrue("@" in pr.get_author())
@mock.patch('trymerge.read_merge_rules', side_effect=mocked_read_merge_rules_NE)
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_pending_status_check(self, mocked_gql: Any, mocked_read_merge_rules: Any) -> None:
""" Tests that PR with nonexistent/pending status checks fails with the right reason.
"""
pr = GitHubPR("pytorch", "pytorch", 76118)
repo = DummyGitRepo()
self.assertRaisesRegex(MandatoryChecksMissingError,
".*are pending/not yet run.*",
lambda: find_matching_merge_rule(pr, repo))
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_get_author_many_reviews(self, mocked_gql: Any) -> None:
""" Tests that all reviews can be fetched
"""
pr = GitHubPR("pytorch", "pytorch", 76123)
approved_by = pr.get_approved_by()
self.assertGreater(len(approved_by), 0)
assert pr._reviews is not None # to pacify mypy
self.assertGreater(len(pr._reviews), 100)
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_get_checkruns_many_runs(self, mocked_gql: Any) -> None:
""" Tests that all checkruns can be fetched
"""
pr = GitHubPR("pytorch", "pytorch", 77700)
conclusions = pr.get_checkrun_conclusions()
self.assertEqual(len(conclusions), 83)
self.assertTrue("pull / linux-docs / build-docs (cpp)" in conclusions.keys())
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_cancelled_gets_ignored(self, mocked_gql: Any) -> None:
""" Tests that cancelled workflow does not override existing successfull status
"""
pr = GitHubPR("pytorch", "pytorch", 82169)
conclusions = pr.get_checkrun_conclusions()
self.assertTrue("Lint" in conclusions.keys())
self.assertEqual(conclusions["Lint"][0], "SUCCESS")
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_get_many_land_checks(self, mocked_gql: Any) -> None:
""" Tests that all checkruns can be fetched for a commit
"""
conclusions = get_land_checkrun_conclusions('pytorch', 'pytorch', '6882717f73deffb692219ccd1fd6db258d8ed684')
self.assertEqual(len(conclusions), 101)
self.assertTrue("pull / linux-docs / build-docs (cpp)" in conclusions.keys())
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_failed_land_checks(self, mocked_gql: Any) -> None:
""" Tests that PR with Land Checks fail with a RunTime error
"""
self.assertRaisesRegex(RuntimeError,
".*Failed to merge; some land checks failed.*",
lambda: validate_land_time_checks('pytorch', 'pytorch', '6882717f73deffb692219ccd1fd6db258d8ed684'))
@mock.patch('trymerge.gh_get_pr_info', return_value=mock_gh_get_info())
@mock.patch('trymerge.parse_args', return_value=mock_parse_args(True, False))
@mock.patch('trymerge.try_revert', side_effect=mock_revert)
def test_main_revert(self, mock_revert: Any, mock_parse_args: Any, gh_get_pr_info: Any) -> None:
trymerge_main()
mock_revert.assert_called_once()
@mock.patch('trymerge.gh_get_pr_info', return_value=mock_gh_get_info())
@mock.patch('trymerge.parse_args', return_value=mock_parse_args(False, True))
@mock.patch('trymerge.merge', side_effect=mock_merge)
def test_main_force(self, mock_merge: Any, mock_parse_args: Any, mock_gh_get_info: Any) -> None:
trymerge_main()
mock_merge.assert_called_once_with(mock.ANY,
mock.ANY,
dry_run=mock.ANY,
force=True,
comment_id=mock.ANY,
on_green=False,
land_checks=False,
mandatory_only=False)
@mock.patch('trymerge.gh_get_pr_info', return_value=mock_gh_get_info())
@mock.patch('trymerge.parse_args', return_value=mock_parse_args(False, False))
@mock.patch('trymerge.merge', side_effect=mock_merge)
def test_main_merge(self, mock_merge: Any, mock_parse_args: Any, mock_gh_get_info: Any) -> None:
trymerge_main()
mock_merge.assert_called_once_with(mock.ANY,
mock.ANY,
dry_run=mock.ANY,
force=False,
comment_id=mock.ANY,
on_green=False,
land_checks=False,
mandatory_only=False)
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_revert_rules(self, mock_gql: Any) -> None:
""" Tests that reverts from collaborators are allowed """
pr = GitHubPR("pytorch", "pytorch", 79694)
repo = DummyGitRepo()
self.assertIsNotNone(validate_revert(repo, pr, comment_id=1189459845))
if __name__ == "__main__":
main()
| pytorch-master | .github/scripts/test_trymerge.py |
#!/usr/bin/env python3
import argparse
import sys
import yaml
from pathlib import Path
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
WORKFLOWS = REPO_ROOT / ".github" / "workflows"
EXPECTED_GROUP = "${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}" \
"-${{ github.event_name == 'workflow_dispatch' }}"
def should_check(filename: Path) -> bool:
with open(filename, "r") as f:
content = f.read()
data = yaml.safe_load(content)
on = data.get("on", data.get(True, {}))
return "pull_request" in on
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Ensure all relevant GitHub actions jobs will be cancelled based on a concurrency key"
)
args = parser.parse_args()
files = list(WORKFLOWS.glob("*.yml"))
errors_found = False
files = [f for f in files if should_check(f)]
names = set()
for filename in files:
with open(filename, "r") as f:
data = yaml.safe_load(f)
name = data.get("name")
if name is not None and name in names:
print("ERROR: duplicate workflow name:", name, file=sys.stderr)
errors_found = True
names.add(name)
expected = {
"group": EXPECTED_GROUP,
"cancel-in-progress": True,
}
actual = data.get("concurrency", None)
if actual != expected:
print(
f"'concurrency' incorrect or not found in '{filename.relative_to(REPO_ROOT)}'",
file=sys.stderr,
)
print(
f"expected: {expected}",
file=sys.stderr,
)
print(
f"actual: {actual}",
file=sys.stderr,
)
errors_found = True
if errors_found:
sys.exit(1)
| pytorch-master | .github/scripts/ensure_actions_will_cancel.py |
from unittest import TestCase, main, mock
from typing import Any, List, Dict
from fetch_latest_green_commit import isGreen, WorkflowCheck
workflowNames = [
"pull",
"trunk",
"Lint",
"linux-binary-libtorch-pre-cxx11",
"android-tests",
"windows-binary-wheel",
"periodic",
"docker-release-builds",
"nightly",
"pr-labels",
"Close stale pull requests",
"Update S3 HTML indices for download.pytorch.org",
"Create Release"
]
def set_workflow_job_status(workflow: List[Dict[str, Any]], name: str, status: str) -> List[Dict[str, Any]]:
for check in workflow:
if check['workflowName'] == name:
check['conclusion'] = status
return workflow
class TestChecks:
def make_test_checks(self) -> List[Dict[str, Any]]:
workflow_checks = []
for i in range(len(workflowNames)):
workflow_checks.append(WorkflowCheck(
workflowName=workflowNames[i],
name="test/job",
jobName="job",
conclusion="success",
)._asdict())
return workflow_checks
class TestPrintCommits(TestCase):
@mock.patch('fetch_latest_green_commit.get_commit_results', return_value=TestChecks().make_test_checks())
def test_all_successful(self, mock_get_commit_results: Any) -> None:
"Test with workflows are successful"
workflow_checks = mock_get_commit_results()
self.assertTrue(isGreen("sha", workflow_checks)[0])
@mock.patch('fetch_latest_green_commit.get_commit_results', return_value=TestChecks().make_test_checks())
def test_necessary_successful(self, mock_get_commit_results: Any) -> None:
"Test with necessary workflows are successful"
workflow_checks = mock_get_commit_results()
workflow_checks = set_workflow_job_status(workflow_checks, workflowNames[8], "failed")
workflow_checks = set_workflow_job_status(workflow_checks, workflowNames[9], "failed")
workflow_checks = set_workflow_job_status(workflow_checks, workflowNames[10], "failed")
workflow_checks = set_workflow_job_status(workflow_checks, workflowNames[11], "failed")
workflow_checks = set_workflow_job_status(workflow_checks, workflowNames[12], "failed")
self.assertTrue(isGreen("sha", workflow_checks)[0])
@mock.patch('fetch_latest_green_commit.get_commit_results', return_value=TestChecks().make_test_checks())
def test_necessary_skipped(self, mock_get_commit_results: Any) -> None:
"Test with necessary job (ex: pull) skipped"
workflow_checks = mock_get_commit_results()
workflow_checks = set_workflow_job_status(workflow_checks, "pull", "skipped")
result = isGreen("sha", workflow_checks)
self.assertTrue(result[0])
@mock.patch('fetch_latest_green_commit.get_commit_results', return_value=TestChecks().make_test_checks())
def test_skippable_skipped(self, mock_get_commit_results: Any) -> None:
"Test with skippable jobs (periodic and docker-release-builds skipped"
workflow_checks = mock_get_commit_results()
workflow_checks = set_workflow_job_status(workflow_checks, "periodic", "skipped")
workflow_checks = set_workflow_job_status(workflow_checks, "docker-release-builds", "skipped")
self.assertTrue(isGreen("sha", workflow_checks))
@mock.patch('fetch_latest_green_commit.get_commit_results', return_value=TestChecks().make_test_checks())
def test_necessary_failed(self, mock_get_commit_results: Any) -> None:
"Test with necessary job (ex: Lint) failed"
workflow_checks = mock_get_commit_results()
workflow_checks = set_workflow_job_status(workflow_checks, "Lint", "failed")
result = isGreen("sha", workflow_checks)
self.assertFalse(result[0])
self.assertEqual(result[1], "Lint checks were not successful")
@mock.patch('fetch_latest_green_commit.get_commit_results', return_value=TestChecks().make_test_checks())
def test_skippable_failed(self, mock_get_commit_results: Any) -> None:
"Test with skippable job (ex: docker-release-builds) failing"
workflow_checks = mock_get_commit_results()
workflow_checks = set_workflow_job_status(workflow_checks, "periodic", "skipped")
workflow_checks = set_workflow_job_status(workflow_checks, "docker-release-builds", "failed")
result = isGreen("sha", workflow_checks)
self.assertFalse(result[0])
self.assertEqual(result[1], "docker-release-builds checks were not successful")
@mock.patch('fetch_latest_green_commit.get_commit_results', return_value={})
def test_no_workflows(self, mock_get_commit_results: Any) -> None:
"Test with missing workflows"
workflow_checks = mock_get_commit_results()
result = isGreen("sha", workflow_checks)
self.assertFalse(result[0])
self.assertEqual(result[1], "missing required workflows: pull, trunk, lint, linux-binary, windows-binary")
if __name__ == "__main__":
main()
| pytorch-master | .github/scripts/test_fetch_latest_green_commit.py |
"""
Generate a torchbench test report from a file containing the PR body.
Currently, only supports running tests on specified model names
Testing environment:
- Intel Xeon 8259CL @ 2.50 GHz, 24 Cores with disabled Turbo and HT
- Nvidia Tesla T4
- Nvidia Driver 470.82.01
- Python 3.8
- CUDA 11.3
"""
# Known issues:
# 1. Does not reuse the build artifact in other CI workflows
# 2. CI jobs are serialized because there is only one worker
import os
import git # type: ignore[import]
import pathlib
import argparse
import subprocess
from typing import List, Tuple
TORCHBENCH_CONFIG_NAME = "config.yaml"
TORCHBENCH_USERBENCHMARK_CONFIG_NAME = "ub-config.yaml"
MAGIC_PREFIX = "RUN_TORCHBENCH:"
MAGIC_TORCHBENCH_PREFIX = "TORCHBENCH_BRANCH:"
ABTEST_CONFIG_TEMPLATE = """# This config is automatically generated by run_torchbench.py
start: {control}
end: {treatment}
threshold: 100
direction: decrease
timeout: 720
tests:"""
def gen_abtest_config(control: str, treatment: str, models: List[str]) -> str:
d = {}
d["control"] = control
d["treatment"] = treatment
config = ABTEST_CONFIG_TEMPLATE.format(**d)
if models == ["ALL"]:
return config + "\n"
for model in models:
config = f"{config}\n - {model}"
config = config + "\n"
return config
def setup_gha_env(name: str, val: str) -> None:
fname = os.environ["GITHUB_ENV"]
content = f"{name}={val}\n"
with open(fname, "a") as fo:
fo.write(content)
def find_current_branch(repo_path: str) -> str:
repo = git.Repo(repo_path)
name: str = repo.active_branch.name
return name
def deploy_torchbench_config(output_dir: str, config: str, config_name: str = TORCHBENCH_CONFIG_NAME) -> None:
# Create test dir if needed
pathlib.Path(output_dir).mkdir(exist_ok=True)
# TorchBench config file name
config_path = os.path.join(output_dir, config_name)
with open(config_path, "w") as fp:
fp.write(config)
def get_valid_models(torchbench_path: str) -> List[str]:
benchmark_path = os.path.join(torchbench_path, "torchbenchmark", "models")
valid_models = [model for model in os.listdir(benchmark_path) if os.path.isdir(os.path.join(benchmark_path, model))]
return valid_models
def get_valid_userbenchmarks(torchbench_path: str) -> List[str]:
def is_valid_ub_dir(ub_path: str) -> bool:
return os.path.isdir(ub_path) and os.path.exists(os.path.join(ub_path, "__init__.py"))
ub_path = os.path.join(os.path.abspath(torchbench_path), "userbenchmark")
ubs = list(filter(is_valid_ub_dir, [os.path.join(ub_path, ubdir) for ubdir in os.listdir(ub_path)]))
valid_ubs = list(map(lambda x: os.path.basename(x), ubs))
return valid_ubs
def extract_models_from_pr(torchbench_path: str, prbody_file: str) -> Tuple[List[str], List[str]]:
model_list = []
userbenchmark_list = []
pr_list = []
with open(prbody_file, "r") as pf:
lines = map(lambda x: x.strip(), pf.read().splitlines())
magic_lines = list(filter(lambda x: x.startswith(MAGIC_PREFIX), lines))
if magic_lines:
# Only the first magic line will be recognized.
pr_list = list(map(lambda x: x.strip(), magic_lines[0][len(MAGIC_PREFIX):].split(",")))
valid_models = get_valid_models(torchbench_path)
valid_ubs = get_valid_userbenchmarks(torchbench_path)
for pr_bm in pr_list:
if pr_bm in valid_models or pr_bm == "ALL":
model_list.append(pr_bm)
elif pr_bm in valid_ubs:
userbenchmark_list.append(pr_bm)
else:
print(f"The model or benchmark {pr_bm} you specified does not exist in TorchBench suite. Please double check.")
exit(-1)
# Shortcut: if pr_list is ["ALL"], run all the model tests
if "ALL" in model_list:
model_list = ["ALL"]
return model_list, userbenchmark_list
def find_torchbench_branch(prbody_file: str) -> str:
branch_name: str = ""
with open(prbody_file, "r") as pf:
lines = map(lambda x: x.strip(), pf.read().splitlines())
magic_lines = list(filter(lambda x: x.startswith(MAGIC_TORCHBENCH_PREFIX), lines))
if magic_lines:
# Only the first magic line will be recognized.
branch_name = magic_lines[0][len(MAGIC_TORCHBENCH_PREFIX):].strip()
# If not specified, use main as the default branch
if not branch_name:
branch_name = "main"
return branch_name
def run_torchbench(pytorch_path: str, torchbench_path: str, output_dir: str) -> None:
# Copy system environment so that we will not override
env = dict(os.environ)
command = ["python", "bisection.py", "--work-dir", output_dir,
"--pytorch-src", pytorch_path, "--torchbench-src", torchbench_path,
"--config", os.path.join(output_dir, TORCHBENCH_CONFIG_NAME),
"--output", os.path.join(output_dir, "result.txt")]
subprocess.check_call(command, cwd=torchbench_path, env=env)
def run_userbenchmarks(pytorch_path: str, torchbench_path: str, base_sha: str, head_sha: str,
userbenchmark: str, output_dir: str) -> None:
# Copy system environment so that we will not override
env = dict(os.environ)
command = ["python", "./.github/scripts/abtest.py",
"--pytorch-repo", pytorch_path,
"--base", base_sha,
"--head", head_sha,
"--userbenchmark", userbenchmark,
"--output-dir", output_dir]
subprocess.check_call(command, cwd=torchbench_path, env=env)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run TorchBench tests based on PR')
parser.add_argument('--pr-body', required=True, help="The file that contains body of a Pull Request")
subparsers = parser.add_subparsers(dest='command')
# parser for setup the torchbench branch name env
branch_parser = subparsers.add_parser("set-torchbench-branch")
# parser to run the torchbench branch
run_parser = subparsers.add_parser("run")
run_parser.add_argument('--pr-num', required=True, type=str, help="The Pull Request number")
run_parser.add_argument('--pr-base-sha', required=True, type=str, help="The Pull Request base hash")
run_parser.add_argument('--pr-head-sha', required=True, type=str, help="The Pull Request head hash")
run_parser.add_argument('--pytorch-path', required=True, type=str, help="Path to pytorch repository")
run_parser.add_argument('--torchbench-path', required=True, type=str, help="Path to TorchBench repository")
args = parser.parse_args()
if args.command == 'set-torchbench-branch':
branch_name = find_torchbench_branch(args.pr_body)
# env name: "TORCHBENCH_BRANCH"
setup_gha_env(MAGIC_TORCHBENCH_PREFIX[:-1], branch_name)
elif args.command == 'run':
output_dir: str = os.path.join(os.environ["HOME"], ".torchbench", "bisection", f"pr{args.pr_num}")
# Assert the current branch in args.torchbench_path is the same as the one specified in pr body
branch_name = find_torchbench_branch(args.pr_body)
current_branch = find_current_branch(args.torchbench_path)
assert branch_name == current_branch, f"Torchbench repo {args.torchbench_path} is on branch {current_branch}, \
but user specified to run on branch {branch_name}."
print(f"Ready to run TorchBench with benchmark. Result will be saved in the directory: {output_dir}.")
# Identify the specified models and userbenchmarks
models, userbenchmarks = extract_models_from_pr(args.torchbench_path, args.pr_body)
if models:
torchbench_config = gen_abtest_config(args.pr_base_sha, args.pr_head_sha, models)
deploy_torchbench_config(output_dir, torchbench_config)
run_torchbench(pytorch_path=args.pytorch_path, torchbench_path=args.torchbench_path, output_dir=output_dir)
if userbenchmarks:
assert len(userbenchmarks) == 1, \
"We don't support running multiple userbenchmarks in single workflow yet." \
"If you need, please submit a feature request."
run_userbenchmarks(pytorch_path=args.pytorch_path, torchbench_path=args.torchbench_path,
base_sha=args.pr_base_sha, head_sha=args.pr_head_sha,
userbenchmark=userbenchmarks[0], output_dir=output_dir)
if not models and not userbenchmarks:
print("Can't parse valid models or userbenchmarks from the pr body. Quit.")
exit(-1)
else:
print(f"The command {args.command} is not supported.")
exit(-1)
| pytorch-master | .github/scripts/run_torchbench.py |
#!/usr/bin/env python3
import os
import re
import tempfile
from collections import defaultdict
from datetime import datetime
from typing import cast, Any, Dict, Iterator, List, Optional, Tuple, Union
RE_GITHUB_URL_MATCH = re.compile("^https://.*@?github.com/(.+)/(.+)$")
def get_git_remote_name() -> str:
return os.getenv("GIT_REMOTE_NAME", "origin")
def get_git_repo_dir() -> str:
from pathlib import Path
return os.getenv("GIT_REPO_DIR", str(Path(__file__).resolve().parent.parent.parent))
def fuzzy_list_to_dict(items: List[Tuple[str, str]]) -> Dict[str, List[str]]:
"""
Converts list to dict preserving elements with duplicate keys
"""
rc: Dict[str, List[str]] = defaultdict(lambda: [])
for (key, val) in items:
rc[key].append(val)
return dict(rc)
def _check_output(items: List[str], encoding: str = "utf-8") -> str:
from subprocess import check_output, CalledProcessError, STDOUT
try:
return check_output(items, stderr=STDOUT).decode(encoding)
except CalledProcessError as e:
msg = f"Command `{' '.join(e.cmd)}` returned non-zero exit code {e.returncode}"
stdout = e.stdout.decode(encoding) if e.stdout is not None else ""
stderr = e.stderr.decode(encoding) if e.stderr is not None else ""
if len(stderr) == 0:
msg += f"\n```\n{stdout}```"
else:
msg += f"\nstdout:\n```\n{stdout}```\nstderr:\n```\n{stderr}```"
raise RuntimeError(msg) from e
class GitCommit:
commit_hash: str
title: str
body: str
author: str
author_date: datetime
commit_date: Optional[datetime]
def __init__(self,
commit_hash: str,
author: str,
author_date: datetime,
title: str,
body: str,
commit_date: Optional[datetime] = None) -> None:
self.commit_hash = commit_hash
self.author = author
self.author_date = author_date
self.commit_date = commit_date
self.title = title
self.body = body
def __repr__(self) -> str:
return f"{self.title} ({self.commit_hash})"
def __contains__(self, item: Any) -> bool:
return item in self.body or item in self.title
def parse_fuller_format(lines: Union[str, List[str]]) -> GitCommit:
"""
Expect commit message generated using `--format=fuller --date=unix` format, i.e.:
commit <sha1>
Author: <author>
AuthorDate: <author date>
Commit: <committer>
CommitDate: <committer date>
<title line>
<full commit message>
"""
if isinstance(lines, str):
lines = lines.split("\n")
# TODO: Handle merge commits correctly
if len(lines) > 1 and lines[1].startswith("Merge:"):
del lines[1]
assert len(lines) > 7
assert lines[0].startswith("commit")
assert lines[1].startswith("Author: ")
assert lines[2].startswith("AuthorDate: ")
assert lines[3].startswith("Commit: ")
assert lines[4].startswith("CommitDate: ")
assert len(lines[5]) == 0
return GitCommit(commit_hash=lines[0].split()[1].strip(),
author=lines[1].split(":", 1)[1].strip(),
author_date=datetime.fromtimestamp(int(lines[2].split(":", 1)[1].strip())),
commit_date=datetime.fromtimestamp(int(lines[4].split(":", 1)[1].strip())),
title=lines[6].strip(),
body="\n".join(lines[7:]),
)
class GitRepo:
def __init__(self, path: str, remote: str = "origin", debug: bool = False) -> None:
self.repo_dir = path
self.remote = remote
self.debug = debug
def _run_git(self, *args: Any) -> str:
if self.debug:
print(f"+ git -C {self.repo_dir} {' '.join(args)}")
return _check_output(["git", "-C", self.repo_dir] + list(args))
def revlist(self, revision_range: str) -> List[str]:
rc = self._run_git("rev-list", revision_range, "--", ".").strip()
return rc.split("\n") if len(rc) > 0 else []
def current_branch(self) -> str:
return self._run_git("symbolic-ref", "--short", "HEAD").strip()
def checkout(self, branch: str) -> None:
self._run_git("checkout", branch)
def fetch(self, ref: Optional[str] = None, branch: Optional[str] = None) -> None:
if branch is None and ref is None:
self._run_git("fetch", self.remote)
elif branch is None:
self._run_git("fetch", self.remote, ref)
else:
self._run_git("fetch", self.remote, f"{ref}:{branch}")
def show_ref(self, name: str) -> str:
refs = self._run_git('show-ref', '-s', name).strip().split('\n')
if not all(refs[i] == refs[0] for i in range(1, len(refs))):
raise RuntimeError(f"referce {name} is ambigous")
return refs[0]
def rev_parse(self, name: str) -> str:
return self._run_git('rev-parse', '--verify', name).strip()
def get_merge_base(self, from_ref: str, to_ref: str) -> str:
return self._run_git('merge-base', from_ref, to_ref).strip()
def patch_id(self, ref: Union[str, List[str]]) -> List[Tuple[str, str]]:
is_list = isinstance(ref, list)
if is_list:
if len(ref) == 0:
return []
ref = " ".join(ref)
rc = _check_output(['sh', '-c', f'git -C {self.repo_dir} show {ref}|git patch-id --stable']).strip()
return [cast(Tuple[str, str], x.split(" ", 1)) for x in rc.split("\n")]
def commits_resolving_gh_pr(self, pr_num: int) -> List[str]:
owner, name = self.gh_owner_and_name()
msg = f"Pull Request resolved: https://github.com/{owner}/{name}/pull/{pr_num}"
rc = self._run_git('log', '--format=%H', '--grep', msg).strip()
return rc.split("\n") if len(rc) > 0 else []
def get_commit(self, ref: str) -> GitCommit:
return parse_fuller_format(self._run_git('show', '--format=fuller', '--date=unix', '--shortstat', ref))
def cherry_pick(self, ref: str) -> None:
self._run_git('cherry-pick', '-x', ref)
def revert(self, ref: str) -> None:
self._run_git("revert", "--no-edit", ref)
def compute_branch_diffs(self, from_branch: str, to_branch: str) -> Tuple[List[str], List[str]]:
"""
Returns list of commmits that are missing in each other branch since their merge base
Might be slow if merge base is between two branches is pretty far off
"""
from_ref = self.rev_parse(from_branch)
to_ref = self.rev_parse(to_branch)
merge_base = self.get_merge_base(from_ref, to_ref)
from_commits = self.revlist(f'{merge_base}..{from_ref}')
to_commits = self.revlist(f'{merge_base}..{to_ref}')
from_ids = fuzzy_list_to_dict(self.patch_id(from_commits))
to_ids = fuzzy_list_to_dict(self.patch_id(to_commits))
for patch_id in set(from_ids).intersection(set(to_ids)):
from_values = from_ids[patch_id]
to_values = to_ids[patch_id]
if len(from_values) != len(to_values):
# Eliminate duplicate commits+reverts from the list
while len(from_values) > 0 and len(to_values) > 0:
frc = self.get_commit(from_values.pop())
toc = self.get_commit(to_values.pop())
# FRC branch might have PR number added to the title
if frc.title != toc.title or frc.author_date != toc.author_date:
# HACK: Same commit were merged, reverted and landed again
# which creates a tracking problem
if (
"pytorch/pytorch" not in self.remote_url() or
frc.commit_hash not in {"0a6a1b27a464ba5be5f587cce2ee12ab8c504dbf",
"6d0f4a1d545a8f161df459e8d4ccafd4b9017dbe",
"edf909e58f06150f7be41da2f98a3b9de3167bca",
"a58c6aea5a0c9f8759a4154e46f544c8b03b8db1",
"7106d216c29ca16a3504aa2bedad948ebcf4abc2"}
):
raise RuntimeError(f"Unexpected differences between {frc} and {toc}")
from_commits.remove(frc.commit_hash)
to_commits.remove(toc.commit_hash)
continue
for commit in from_values:
from_commits.remove(commit)
for commit in to_values:
to_commits.remove(commit)
# Another HACK: Patch-id is not stable for commits with binary files or for big changes across commits
# I.e. cherry-picking those from one branch into another will change patchid
if "pytorch/pytorch" in self.remote_url():
for excluded_commit in {"8e09e20c1dafcdbdb45c2d1574da68a32e54a3a5",
"5f37e5c2a39c3acb776756a17730b865f0953432",
"b5222584e6d6990c6585981a936defd1af14c0ba",
"84d9a2e42d5ed30ec3b8b4140c38dd83abbce88d",
"f211ec90a6cdc8a2a5795478b5b5c8d7d7896f7e"}:
if excluded_commit in from_commits:
from_commits.remove(excluded_commit)
return (from_commits, to_commits)
def cherry_pick_commits(self, from_branch: str, to_branch: str) -> None:
orig_branch = self.current_branch()
self.checkout(to_branch)
from_commits, to_commits = self.compute_branch_diffs(from_branch, to_branch)
if len(from_commits) == 0:
print("Nothing to do")
self.checkout(orig_branch)
return
for commit in reversed(from_commits):
print(f"Cherry picking commit {commit}")
self.cherry_pick(commit)
self.checkout(orig_branch)
def push(self, branch: str, dry_run: bool, retry: int = 3) -> None:
for cnt in range(retry):
try:
if dry_run:
self._run_git("push", "--dry-run", self.remote, branch)
else:
self._run_git("push", self.remote, branch)
except RuntimeError as e:
print(f"{cnt} push attempt failed with {e}")
self.fetch()
self._run_git("rebase", f"{self.remote}/{branch}")
def head_hash(self) -> str:
return self._run_git("show-ref", "--hash", "HEAD").strip()
def remote_url(self) -> str:
return self._run_git("remote", "get-url", self.remote)
def gh_owner_and_name(self) -> Tuple[str, str]:
url = os.getenv("GIT_REMOTE_URL", None)
if url is None:
url = self.remote_url()
rc = RE_GITHUB_URL_MATCH.match(url)
if rc is None:
raise RuntimeError(f"Unexpected url format {url}")
return cast(Tuple[str, str], rc.groups())
def commit_message(self, ref: str) -> str:
return self._run_git("log", "-1", "--format=%B", ref)
def amend_commit_message(self, msg: str) -> None:
self._run_git("commit", "--amend", "-m", msg)
def clone_repo(username: str, password: str, org: str, project: str) -> GitRepo:
path = tempfile.mkdtemp()
_check_output(['git', 'clone', f'https://{username}:{password}@github.com/{org}/{project}', path]).strip()
return GitRepo(path=path)
class PeekableIterator(Iterator[str]):
def __init__(self, val: str) -> None:
self._val = val
self._idx = -1
def peek(self) -> Optional[str]:
if self._idx + 1 >= len(self._val):
return None
return self._val[self._idx + 1]
def __iter__(self) -> "PeekableIterator":
return self
def __next__(self) -> str:
rc = self.peek()
if rc is None:
raise StopIteration
self._idx += 1
return rc
def patterns_to_regex(allowed_patterns: List[str]) -> Any:
"""
pattern is glob-like, i.e. the only special sequences it has are:
- ? - matches single character
- * - matches any non-folder separator characters or no character
- ** - matches any characters or no character
Assuming that patterns are free of braces and backslashes
the only character that needs to be escaped are dot and plus
"""
rc = "("
for idx, pattern in enumerate(allowed_patterns):
if idx > 0:
rc += "|"
pattern_ = PeekableIterator(pattern)
assert not any(c in pattern for c in "{}()[]\\")
for c in pattern_:
if c == ".":
rc += "\\."
elif c == "+":
rc += "\\+"
elif c == "*":
if pattern_.peek() == "*":
next(pattern_)
rc += ".*"
else:
rc += "[^/]*"
else:
rc += c
rc += ")"
return re.compile(rc)
| pytorch-master | .github/scripts/gitutils.py |
#!/usr/bin/env python3
"""Generates a matrix to be utilized through github actions
Will output a condensed version of the matrix if on a pull request that only
includes the latest version of python we support built on three different
architectures:
* CPU
* Latest CUDA
* Latest ROCM
"""
from typing import Dict, List, Tuple, Optional
CUDA_ARCHES = ["10.2", "11.3", "11.6", "11.7"]
ROCM_ARCHES = ["5.1.1", "5.2"]
def arch_type(arch_version: str) -> str:
if arch_version in CUDA_ARCHES:
return "cuda"
elif arch_version in ROCM_ARCHES:
return "rocm"
else: # arch_version should always be "cpu" in this case
return "cpu"
WHEEL_CONTAINER_IMAGES = {
**{
gpu_arch: f"pytorch/manylinux-builder:cuda{gpu_arch}"
for gpu_arch in CUDA_ARCHES
},
**{
gpu_arch: f"pytorch/manylinux-builder:rocm{gpu_arch}"
for gpu_arch in ROCM_ARCHES
},
"cpu": "pytorch/manylinux-builder:cpu",
}
CONDA_CONTAINER_IMAGES = {
**{gpu_arch: f"pytorch/conda-builder:cuda{gpu_arch}" for gpu_arch in CUDA_ARCHES},
"cpu": "pytorch/conda-builder:cpu",
}
PRE_CXX11_ABI = "pre-cxx11"
CXX11_ABI = "cxx11-abi"
RELEASE = "release"
DEBUG = "debug"
LIBTORCH_CONTAINER_IMAGES: Dict[Tuple[str, str], str] = {
**{
(gpu_arch, PRE_CXX11_ABI): f"pytorch/manylinux-builder:cuda{gpu_arch}"
for gpu_arch in CUDA_ARCHES
},
**{
(gpu_arch, CXX11_ABI): f"pytorch/libtorch-cxx11-builder:cuda{gpu_arch}"
for gpu_arch in CUDA_ARCHES
},
**{
(gpu_arch, PRE_CXX11_ABI): f"pytorch/manylinux-builder:rocm{gpu_arch}"
for gpu_arch in ROCM_ARCHES
},
**{
(gpu_arch, CXX11_ABI): f"pytorch/libtorch-cxx11-builder:rocm{gpu_arch}"
for gpu_arch in ROCM_ARCHES
},
("cpu", PRE_CXX11_ABI): "pytorch/manylinux-builder:cpu",
("cpu", CXX11_ABI): "pytorch/libtorch-cxx11-builder:cpu",
}
FULL_PYTHON_VERSIONS = ["3.7", "3.8", "3.9", "3.10"]
def translate_desired_cuda(gpu_arch_type: str, gpu_arch_version: str) -> str:
return {
"cpu": "cpu",
"cuda": f"cu{gpu_arch_version.replace('.', '')}",
"rocm": f"rocm{gpu_arch_version}",
}.get(gpu_arch_type, gpu_arch_version)
def list_without(in_list: List[str], without: List[str]) -> List[str]:
return [item for item in in_list if item not in without]
def generate_conda_matrix(os: str) -> List[Dict[str, str]]:
ret: List[Dict[str, str]] = []
arches = ["cpu"]
python_versions = FULL_PYTHON_VERSIONS
if os == "linux":
arches += CUDA_ARCHES
elif os == "windows":
# We don't build CUDA 10.2 for window see https://github.com/pytorch/pytorch/issues/65648
arches += list_without(CUDA_ARCHES, ["10.2"])
elif os == "macos-arm64":
python_versions = list_without(python_versions, ["3.7"])
for python_version in python_versions:
# We don't currently build conda packages for rocm
for arch_version in arches:
gpu_arch_type = arch_type(arch_version)
gpu_arch_version = "" if arch_version == "cpu" else arch_version
ret.append(
{
"python_version": python_version,
"gpu_arch_type": gpu_arch_type,
"gpu_arch_version": gpu_arch_version,
"desired_cuda": translate_desired_cuda(
gpu_arch_type, gpu_arch_version
),
"container_image": CONDA_CONTAINER_IMAGES[arch_version],
"package_type": "conda",
"build_name": f"conda-py{python_version}-{gpu_arch_type}{gpu_arch_version}".replace(
".", "_"
),
}
)
return ret
def generate_libtorch_matrix(os: str, abi_version: str,
arches: Optional[List[str]] = None,
libtorch_variants: Optional[List[str]] = None) -> List[Dict[str, str]]:
if arches is None:
arches = ["cpu"]
if os == "linux":
arches += CUDA_ARCHES
arches += ROCM_ARCHES
elif os == "windows":
# We don't build CUDA 10.2 for window see https://github.com/pytorch/pytorch/issues/65648
arches += list_without(CUDA_ARCHES, ["10.2"])
if libtorch_variants is None:
libtorch_variants = [
"shared-with-deps",
"shared-without-deps",
"static-with-deps",
"static-without-deps",
]
ret: List[Dict[str, str]] = []
for arch_version in arches:
for libtorch_variant in libtorch_variants:
# one of the values in the following list must be exactly
# CXX11_ABI, but the precise value of the other one doesn't
# matter
gpu_arch_type = arch_type(arch_version)
gpu_arch_version = "" if arch_version == "cpu" else arch_version
# ROCm builds without-deps failed even in ROCm runners; skip for now
if gpu_arch_type == "rocm" and "without-deps" in libtorch_variant:
continue
ret.append(
{
"gpu_arch_type": gpu_arch_type,
"gpu_arch_version": gpu_arch_version,
"desired_cuda": translate_desired_cuda(
gpu_arch_type, gpu_arch_version
),
"libtorch_variant": libtorch_variant,
"libtorch_config": abi_version if os == "windows" else "",
"devtoolset": abi_version if os != "windows" else "",
"container_image": LIBTORCH_CONTAINER_IMAGES[
(arch_version, abi_version)
] if os != "windows" else "",
"package_type": "libtorch",
"build_name": f"libtorch-{gpu_arch_type}{gpu_arch_version}-{libtorch_variant}-{abi_version}".replace(
".", "_"
),
}
)
return ret
def generate_wheels_matrix(os: str,
arches: Optional[List[str]] = None,
python_versions: Optional[List[str]] = None) -> List[Dict[str, str]]:
package_type = "wheel"
if os == "linux":
# NOTE: We only build manywheel packages for linux
package_type = "manywheel"
if python_versions is None:
# Define default python version
python_versions = list(FULL_PYTHON_VERSIONS)
if os == "macos-arm64":
python_versions = list_without(python_versions, ["3.7"])
if os == "linux":
# NOTE: We only build 3.11 wheel on linux as 3.11 is not
# available on conda right now
python_versions.append("3.11")
if arches is None:
# Define default compute archivectures
arches = ["cpu"]
if os == "linux":
arches += CUDA_ARCHES + ROCM_ARCHES
elif os == "windows":
# We don't build CUDA 10.2 for window see https://github.com/pytorch/pytorch/issues/65648
arches += list_without(CUDA_ARCHES, ["10.2"])
ret: List[Dict[str, str]] = []
for python_version in python_versions:
for arch_version in arches:
gpu_arch_type = arch_type(arch_version)
gpu_arch_version = "" if arch_version == "cpu" else arch_version
# Skip rocm 3.11 binaries for now as the docker image are not correct
if python_version == "3.11" and gpu_arch_type == "rocm":
continue
ret.append(
{
"python_version": python_version,
"gpu_arch_type": gpu_arch_type,
"gpu_arch_version": gpu_arch_version,
"desired_cuda": translate_desired_cuda(
gpu_arch_type, gpu_arch_version
),
"container_image": WHEEL_CONTAINER_IMAGES[arch_version],
"package_type": package_type,
"build_name": f"{package_type}-py{python_version}-{gpu_arch_type}{gpu_arch_version}".replace(
".", "_"
),
}
)
return ret
| pytorch-master | .github/scripts/generate_binary_build_matrix.py |
#!/usr/bin/env python3
import os
import subprocess
import sys
import re
from typing import Any
from gitutils import get_git_remote_name, get_git_repo_dir, GitRepo
from trymerge import gh_post_pr_comment as gh_post_comment, GitHubPR
def parse_args() -> Any:
from argparse import ArgumentParser
parser = ArgumentParser("Rebase PR into branch")
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("--branch", type=str)
parser.add_argument("pr_num", type=int)
return parser.parse_args()
def rebase_onto(pr: GitHubPR, repo: GitRepo, onto_branch: str, dry_run: bool = False) -> None:
branch = f"pull/{pr.pr_num}/head"
onto_branch = f"refs/remotes/origin/{onto_branch}"
remote_url = f"https://github.com/{pr.info['headRepository']['nameWithOwner']}.git"
refspec = f"{branch}:{pr.head_ref()}"
repo.fetch(branch, branch)
repo._run_git("rebase", onto_branch, branch)
if dry_run:
push_result = repo._run_git("push", "--dry-run", "-f", remote_url, refspec)
else:
push_result = repo._run_git("push", "-f", remote_url, refspec)
if "Everything up-to-date" in push_result:
gh_post_comment(pr.org, pr.project, pr.pr_num,
f"Tried to rebase and push PR #{pr.pr_num}, but it was already up to date", dry_run=dry_run)
else:
gh_post_comment(pr.org, pr.project, pr.pr_num,
f"Successfully rebased `{pr.head_ref()}` onto `{onto_branch}`, please pull locally " +
f"before adding more changes (for example, via `git checkout {pr.head_ref()} && " +
"git pull --rebase`)", dry_run=dry_run)
def rebase_ghstack_onto(pr: GitHubPR, repo: GitRepo, onto_branch: str, dry_run: bool = False) -> None:
if subprocess.run([sys.executable, "-m", "ghstack", "--help"], capture_output=True).returncode != 0:
subprocess.run([sys.executable, "-m", "pip", "install", "ghstack"])
orig_ref = f"{re.sub(r'/head$', '/orig', pr.head_ref())}"
onto_branch = f"refs/remotes/origin/{onto_branch}"
repo.fetch(orig_ref, orig_ref)
repo._run_git("rebase", onto_branch, orig_ref)
# steal the identity of the committer of the commit on the orig branch
email = repo._run_git("log", orig_ref, "--pretty=format:%ae", "-1")
name = repo._run_git("log", orig_ref, "--pretty=format:%an", "-1")
repo._run_git("config", "--global", "user.name", name)
repo._run_git("config", "--global", "user.email", email)
os.environ["OAUTH_TOKEN"] = os.environ["GITHUB_TOKEN"]
with open('.ghstackrc', 'w+') as f:
f.write('[ghstack]\n' +
"github_url=github.com\n" +
"github_username=pytorchmergebot\n" +
"remote_name=origin")
if dry_run:
print("Don't know how to dry-run ghstack")
else:
ghstack_result = subprocess.run(["ghstack"], capture_output=True)
push_result = ghstack_result.stdout.decode("utf-8")
print(push_result)
if ghstack_result.returncode != 0:
raise Exception(f"\n```{push_result}```")
# The contents of a successful push result should look like:
# Summary of changes (ghstack 0.6.0)
# - Updated https://github.com/clee2000/random-testing/pull/2
# - Updated https://github.com/clee2000/random-testing/pull/1
# Facebook employees can import your changes by running
# (on a Facebook machine):
# ghimport -s https://github.com/clee2000/random-testing/pull/2
# If you want to work on this diff stack on another machine:
# ghstack checkout https://github.com/clee2000/random-testing/pull/2
org, project = repo.gh_owner_and_name()
for line in push_result.splitlines():
if "Updated" in line:
pr_num = int(line.split("/")[-1])
if pr_num != pr.pr_num:
gh_post_comment(pr.org, pr.project, pr_num,
f"Rebased `{orig_ref}` onto `{onto_branch}` because #{pr.pr_num} was rebased, "
"please pull locally before adding more changes (for example, via `ghstack " +
f"checkout https://github.com/{org}/{project}/pull/{pr_num}`)", dry_run=dry_run)
else:
gh_post_comment(pr.org, pr.project, pr_num,
f"Successfully rebased `{orig_ref}` onto `{onto_branch}`, please pull locally " +
"before adding more changes (for example, via `ghstack " +
f"checkout https://github.com/{org}/{project}/pull/{pr.pr_num}`)", dry_run=dry_run)
if f"Skipped https://github.com/{org}/{project}/pull/{pr.pr_num}" in push_result:
gh_post_comment(pr.org, pr.project, pr.pr_num,
f"Tried to rebase and push PR #{pr.pr_num}, but it was already up to date", dry_run=dry_run)
def main() -> None:
args = parse_args()
repo = GitRepo(get_git_repo_dir(), get_git_remote_name(), debug=True)
org, project = repo.gh_owner_and_name()
pr = GitHubPR(org, project, args.pr_num)
onto_branch = args.branch if args.branch else pr.default_branch()
msg = "@pytorchbot successfully started a rebase job."
msg += f" Check the current status [here]({os.getenv('GH_RUN_URL')})"
gh_post_comment(org, project, args.pr_num, msg, dry_run=args.dry_run)
if pr.is_closed():
gh_post_comment(org, project, args.pr_num, f"PR #{args.pr_num} is closed, won't rebase", dry_run=args.dry_run)
return
try:
if pr.is_ghstack_pr():
rebase_ghstack_onto(pr, repo, onto_branch, dry_run=args.dry_run)
return
rebase_onto(pr, repo, onto_branch, dry_run=args.dry_run)
except Exception as e:
msg = f"Rebase failed due to {e}"
run_url = os.getenv("GH_RUN_URL")
if run_url is not None:
msg += f"\nRaised by {run_url}"
gh_post_comment(org, project, args.pr_num, msg, dry_run=args.dry_run)
if __name__ == "__main__":
main()
| pytorch-master | .github/scripts/tryrebase.py |
#!/usr/bin/env python3
'''
Test ownership was introduced in https://github.com/pytorch/pytorch/issues/66232.
As a part of enforcing test ownership, we want to maintain a list of existing PyTorch labels
to verify the owners' existence. This script outputs a file containing a list of existing
pytorch/pytorch labels so that the file could be uploaded to S3.
This script assumes the correct env vars are set for AWS permissions.
'''
import boto3 # type: ignore[import]
import json
from functools import lru_cache
from typing import List, Any
from urllib.request import urlopen, Request
# Modified from https://github.com/pytorch/pytorch/blob/b00206d4737d1f1e7a442c9f8a1cadccd272a386/torch/hub.py#L129
def _read_url(url: Any) -> Any:
with urlopen(url) as r:
return r.headers, r.read().decode(r.headers.get_content_charset('utf-8'))
def request_for_labels(url: str) -> Any:
headers = {'Accept': 'application/vnd.github.v3+json'}
return _read_url(Request(url, headers=headers))
def get_last_page(header: Any) -> int:
# Link info looks like: <https://api.github.com/repositories/65600975/labels?per_page=100&page=2>;
# rel="next", <https://api.github.com/repositories/65600975/labels?per_page=100&page=3>; rel="last"
link_info = header['link']
prefix = "&page="
suffix = ">;"
return int(link_info[link_info.rindex(prefix) + len(prefix):link_info.rindex(suffix)])
def update_labels(labels: List[str], info: str) -> None:
labels_json = json.loads(info)
labels.extend([x["name"] for x in labels_json])
@lru_cache()
def get_pytorch_labels() -> List[str]:
prefix = "https://api.github.com/repos/pytorch/pytorch/labels?per_page=100"
header, info = request_for_labels(prefix + "&page=1")
labels: List[str] = []
update_labels(labels, info)
last_page = get_last_page(header)
assert last_page > 0, "Error reading header info to determine total number of pages of labels"
for page_number in range(2, last_page + 1): # skip page 1
_, info = request_for_labels(prefix + f"&page={page_number}")
update_labels(labels, info)
return labels
def send_labels_to_S3(labels: List[str]) -> None:
labels_file_name = "pytorch_labels.json"
obj = boto3.resource('s3').Object('ossci-metrics', labels_file_name)
obj.put(Body=json.dumps(labels).encode())
def main() -> None:
send_labels_to_S3(get_pytorch_labels())
if __name__ == '__main__':
main()
| pytorch-master | .github/scripts/export_pytorch_labels.py |
#!/usr/bin/env python3
import os
import re
def main() -> None:
ref = os.environ['GITHUB_REF']
m = re.match(r'^refs/(\w+)/(.*)$', ref)
if m:
category, stripped = m.groups()
if category == 'heads':
print(f'::set-output name=branch::{stripped}')
elif category == 'pull':
print(f'::set-output name=branch::pull/{stripped.split("/")[0]}')
elif category == 'tags':
print(f'::set-output name=tag::{stripped}')
if __name__ == '__main__':
main()
| pytorch-master | .github/scripts/parse_ref.py |
import json
import os
import subprocess
import requests
from typing import Any, Dict
from argparse import ArgumentParser
MERGEBOT_TOKEN = os.environ["MERGEBOT_TOKEN"]
PYTORCHBOT_TOKEN = os.environ["PYTORCHBOT_TOKEN"]
OWNER, REPO = "pytorch", "pytorch"
def git_api(
url: str, params: Dict[str, str], type: str = "get", token: str = MERGEBOT_TOKEN
) -> Any:
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": f"token {token}",
}
if type == "post":
return requests.post(
f"https://api.github.com{url}",
data=json.dumps(params),
headers=headers,
).json()
elif type == "patch":
return requests.patch(
f"https://api.github.com{url}",
data=json.dumps(params),
headers=headers,
).json()
else:
return requests.get(
f"https://api.github.com{url}",
params=params,
headers=headers,
).json()
def parse_args() -> Any:
parser = ArgumentParser("Rebase PR into branch")
parser.add_argument("--repo-name", type=str)
parser.add_argument("--branch", type=str)
return parser.parse_args()
def make_pr(repo_name: str, branch_name: str) -> Any:
params = {
"title": f"[{repo_name} hash update] update the pinned {repo_name} hash",
"head": branch_name,
"base": "master",
"body": "This PR is auto-generated nightly by [this action](https://github.com/pytorch/pytorch/blob/master/"
+ f".github/workflows/_update-commit-hash.yml).\nUpdate the pinned {repo_name} hash.",
}
response = git_api(f"/repos/{OWNER}/{REPO}/pulls", params, type="post")
print(f"made pr {response['html_url']}")
return response["number"]
def approve_pr(pr_number: str) -> None:
params = {"event": "APPROVE"}
# use pytorchbot to approve the pr
git_api(
f"/repos/{OWNER}/{REPO}/pulls/{pr_number}/reviews",
params,
type="post",
token=PYTORCHBOT_TOKEN,
)
def make_comment(pr_number: str, msg: str) -> None:
params = {"body": msg}
# comment with pytorchbot because pytorchmergebot gets ignored
git_api(
f"/repos/{OWNER}/{REPO}/issues/{pr_number}/comments",
params,
type="post",
token=PYTORCHBOT_TOKEN,
)
def close_pr(pr_number: str) -> None:
params = {"state": "closed"}
git_api(
f"/repos/{OWNER}/{REPO}/pulls/{pr_number}",
params,
type="patch",
)
def is_newer_hash(new_hash: str, old_hash: str, repo_name: str) -> bool:
def _get_date(hash: str) -> int:
# this git command prints the unix timestamp of the hash
return int(
subprocess.run(
f"git show --no-patch --no-notes --pretty=%ct {hash}".split(),
capture_output=True,
cwd=f"{repo_name}",
)
.stdout.decode("utf-8")
.strip()
)
return _get_date(new_hash) > _get_date(old_hash)
def main() -> None:
args = parse_args()
branch_name = os.environ["NEW_BRANCH_NAME"]
pr_num = None
# query to see if a pr already exists
params = {
"q": f"is:pr is:open in:title author:pytorchmergebot repo:{OWNER}/{REPO} {args.repo_name} hash update"
}
response = git_api("/search/issues", params)
if response["total_count"] != 0:
# pr does exist
pr_num = response["items"][0]["number"]
link = response["items"][0]["html_url"]
response = git_api(f"/repos/{OWNER}/{REPO}/pulls/{pr_num}", {})
branch_name = response["head"]["ref"]
print(
f"pr does exist, number is {pr_num}, branch name is {branch_name}, link is {link}"
)
hash = (
subprocess.run(
f"git rev-parse {args.branch}".split(),
capture_output=True,
cwd=f"{args.repo_name}",
)
.stdout.decode("utf-8")
.strip()
)
with open(f".github/ci_commit_pins/{args.repo_name}.txt", "r+") as f:
old_hash = f.read().strip()
f.seek(0)
f.truncate()
f.write(f"{hash}\n")
if is_newer_hash(hash, old_hash, args.repo_name):
# if there was an update, push to branch
subprocess.run(f"git checkout -b {branch_name}".split())
subprocess.run(f"git add .github/ci_commit_pins/{args.repo_name}.txt".split())
subprocess.run(
"git commit -m".split() + [f"update {args.repo_name} commit hash"]
)
subprocess.run(f"git push --set-upstream origin {branch_name} -f".split())
print(f"changes pushed to branch {branch_name}")
if pr_num is None:
# no existing pr, so make a new one and approve it
pr_num = make_pr(args.repo_name, branch_name)
approve_pr(pr_num)
# comment to merge if all checks are green
make_comment(pr_num, "@pytorchbot merge -g")
else:
print(
f"tried to update from old hash: {old_hash} to new hash: {hash} but the old hash seems to be newer, not creating pr"
)
if pr_num is not None:
make_comment(pr_num, "closing pr as the current hash seems up to date")
close_pr(pr_num)
print(f"closing PR {pr_num}")
if __name__ == "__main__":
main()
| pytorch-master | .github/scripts/update_commit_hashes.py |
#!/usr/bin/env python3
from gitutils import PeekableIterator, patterns_to_regex
from unittest import TestCase, main
class TestPeekableIterator(TestCase):
def test_iterator(self, input_: str = "abcdef") -> None:
iter_ = PeekableIterator(input_)
for idx, c in enumerate(iter_):
self.assertEqual(c, input_[idx])
def test_is_iterable(self) -> None:
from collections.abc import Iterator
iter_ = PeekableIterator("")
self.assertTrue(isinstance(iter_, Iterator))
def test_peek(self, input_: str = "abcdef") -> None:
iter_ = PeekableIterator(input_)
for idx, c in enumerate(iter_):
if idx + 1 < len(input_):
self.assertEqual(iter_.peek(), input_[idx + 1])
else:
self.assertTrue(iter_.peek() is None)
class TestPattern(TestCase):
def test_double_asterisks(self) -> None:
allowed_patterns = [
"aten/src/ATen/native/**LinearAlgebra*",
]
patterns_re = patterns_to_regex(allowed_patterns)
fnames = [
"aten/src/ATen/native/LinearAlgebra.cpp",
"aten/src/ATen/native/cpu/LinearAlgebraKernel.cpp"]
for filename in fnames:
self.assertTrue(patterns_re.match(filename))
if __name__ == '__main__':
main()
| pytorch-master | .github/scripts/test_gitutils.py |
import sys
from typing import Any, Dict, List, NamedTuple, Tuple
from gitutils import _check_output
import rockset # type: ignore[import]
import os
import re
def eprint(msg: str) -> None:
print(msg, file=sys.stderr)
class WorkflowCheck(NamedTuple):
workflowName: str
name: str
jobName: str
conclusion: str
def get_latest_commits() -> List[str]:
latest_viable_commit = _check_output(
[
"git",
"log",
"-n",
"1",
"--pretty=format:%H",
"origin/viable/strict",
],
encoding="ascii",
)
commits = _check_output(
[
"git",
"rev-list",
f"{latest_viable_commit}^..HEAD",
"--remotes=*origin/master",
],
encoding="ascii",
).splitlines()
return commits
def query_commits(commits: List[str], qlambda: Any) -> Any:
params = rockset.ParamDict()
params['shas'] = ",".join(commits)
results = qlambda.execute(parameters=params)
return results
def print_commit_status(commit: str, results: Dict[str, Any]) -> None:
print(commit)
for check in results['results']:
if check['sha'] == commit:
print(f"\t{check['conclusion']:>10}: {check['name']}")
def get_commit_results(commit: str, results: Dict[str, Any]) -> List[Dict[str, Any]]:
workflow_checks = []
for check in results['results']:
if check['sha'] == commit:
workflow_checks.append(WorkflowCheck(
workflowName=check['workflowName'],
name=check['name'],
jobName=check['jobName'],
conclusion=check['conclusion'],
)._asdict())
return workflow_checks
def isGreen(commit: str, results: Dict[str, Any]) -> Tuple[bool, str]:
workflow_checks = get_commit_results(commit, results)
regex = {
"pull": False,
"trunk": False,
"lint": False,
"linux-binary": False,
"windows-binary": False,
}
for check in workflow_checks:
workflowName = check['workflowName']
conclusion = check['conclusion']
for required_check in regex:
if re.match(required_check, workflowName, flags=re.IGNORECASE):
if conclusion not in ["success", "skipped"]:
return (False, workflowName + " checks were not successful")
else:
regex[required_check] = True
if workflowName in ["periodic", "docker-release-builds"] and conclusion not in ["success", "skipped"]:
return (False, workflowName + " checks were not successful")
missing_workflows = [x for x in regex.keys() if not regex[x]]
if len(missing_workflows) > 0:
return (False, "missing required workflows: " + ", ".join(missing_workflows))
return (True, "")
def get_latest_green_commit(commits: List[str], results: Dict[str, Any]) -> Any:
for commit in commits:
eprint(f"Checking {commit}")
is_green, msg = isGreen(commit, results)
if is_green:
eprint("GREEN")
return commit
else:
eprint("RED: " + msg)
return None
def main() -> None:
rs = rockset.Client(
api_server="api.rs2.usw2.rockset.com", api_key=os.environ["ROCKSET_API_KEY"]
)
qlambda = rs.QueryLambda.retrieve(
'commit_jobs_batch_query',
version='15aba20837ae9d75',
workspace='commons')
commits = get_latest_commits()
results = query_commits(commits, qlambda)
latest_viable_commit = get_latest_green_commit(commits, results)
print(latest_viable_commit)
if __name__ == "__main__":
main()
| pytorch-master | .github/scripts/fetch_latest_green_commit.py |
# Helper to get the id of the currently running job in a GitHub Actions
# workflow. GitHub does not provide this information to workflow runs, so we
# need to figure it out based on what they *do* provide.
import requests
import os
import argparse
# Our strategy is to retrieve the parent workflow run, then filter its jobs on
# RUNNER_NAME to figure out which job we're currently running.
#
# Why RUNNER_NAME? Because it's the only thing that uniquely identifies a job within a workflow.
# GITHUB_JOB doesn't work, as it corresponds to the job yaml id
# (https://bit.ly/37e78oI), which has two problems:
# 1. It's not present in the workflow job JSON object, so we can't use it as a filter.
# 2. It isn't unique; for matrix jobs the job yaml id is the same for all jobs in the matrix.
#
# RUNNER_NAME on the other hand is unique across the pool of runners. Also,
# since only one job can be scheduled on a runner at a time, we know that
# looking for RUNNER_NAME will uniquely identify the job we're currently
# running.
parser = argparse.ArgumentParser()
parser.add_argument(
"workflow_run_id", help="The id of the workflow run, should be GITHUB_RUN_ID"
)
parser.add_argument(
"runner_name",
help="The name of the runner to retrieve the job id, should be RUNNER_NAME",
)
args = parser.parse_args()
# From https://docs.github.com/en/actions/learn-github-actions/environment-variables
PYTORCH_REPO = os.environ.get("GITHUB_REPOSITORY", "pytorch/pytorch")
PYTORCH_GITHUB_API = f"https://api.github.com/repos/{PYTORCH_REPO}"
GITHUB_TOKEN = os.environ["GITHUB_TOKEN"]
REQUEST_HEADERS = {
"Accept": "application/vnd.github.v3+json",
"Authorization": "token " + GITHUB_TOKEN,
}
response = requests.get(
f"{PYTORCH_GITHUB_API}/actions/runs/{args.workflow_run_id}/jobs?per_page=100",
headers=REQUEST_HEADERS,
)
jobs = response.json()["jobs"]
while "next" in response.links.keys():
response = requests.get(response.links["next"]["url"], headers=REQUEST_HEADERS)
jobs.extend(response.json()["jobs"])
# Sort the jobs list by start time, in descending order. We want to get the most
# recently scheduled job on the runner.
jobs.sort(key=lambda job: job["started_at"], reverse=True)
for job in jobs:
if job["runner_name"] == args.runner_name:
print(job["id"])
exit(0)
exit(1)
| pytorch-master | .github/scripts/get_workflow_job_id.py |
#!/usr/bin/env python3
'''
Verify that it is possible to round-trip native_functions.yaml via ruamel under some
configuration. Keeping native_functions.yaml consistent in this way allows us to
run codemods on the file using ruamel without introducing line noise. Note that we don't
want to normalize the YAML file, as that would to lots of spurious lint failures. Anything
that ruamel understands how to roundtrip, e.g., whitespace and comments, is OK!
ruamel is a bit picky about inconsistent indentation, so you will have to indent your
file properly. Also, if you are working on changing the syntax of native_functions.yaml,
you may find that you want to use some format that is not what ruamel prefers. If so,
it is OK to modify this script (instead of reformatting native_functions.yaml)--the point
is simply to make sure that there is *some* configuration of ruamel that can round trip
the YAML, not to be prescriptive about it.
'''
import ruamel.yaml # type: ignore[import]
import difflib
import sys
from pathlib import Path
from io import StringIO
def fn(base: str) -> str:
return str(base / Path("aten/src/ATen/native/native_functions.yaml"))
with open(Path(__file__).parent.parent.parent / fn('.'), "r") as f:
contents = f.read()
yaml = ruamel.yaml.YAML() # type: ignore[attr-defined]
yaml.preserve_quotes = True # type: ignore[assignment]
yaml.width = 1000 # type: ignore[assignment]
yaml.boolean_representation = ['False', 'True'] # type: ignore[attr-defined]
r = yaml.load(contents)
# Cuz ruamel's author intentionally didn't include conversion to string
# https://stackoverflow.com/questions/47614862/best-way-to-use-ruamel-yaml-to-dump-to-string-not-to-stream
string_stream = StringIO()
yaml.dump(r, string_stream)
new_contents = string_stream.getvalue()
string_stream.close()
if contents != new_contents:
print("""\
## LINT FAILURE: native_functions.yaml ##
native_functions.yaml failed lint; please apply the diff below to fix lint.
If you think this is in error, please see .github/scripts/lint_native_functions.py
""", file=sys.stderr)
sys.stdout.writelines(difflib.unified_diff(contents.splitlines(True), new_contents.splitlines(True), fn('a'), fn('b')))
sys.exit(1)
| pytorch-master | .github/scripts/lint_native_functions.py |
pytorch-master | aten/src/ATen/function_wrapper.py |
|
#!/usr/bin/env python3
import argparse
import glob
import sys
import os
from torchgen.code_template import CodeTemplate
H_NAME = "glsl.h"
CPP_NAME = "glsl.cpp"
DEFAULT_ENV = {"precision": "highp", "format": "rgba32f"}
def findAllGlsls(path):
vexs = glob.glob(os.path.join(path, '**', '*.glsl'), recursive=True)
output = []
for f in vexs:
if len(f) > 1:
output.append(f)
output.sort()
return output
def getName(filePath):
return os.path.basename(filePath).replace("/", "_").replace(".", "_")
def genCppH(hFilePath, cppFilePath, templateGlslPaths, tmpDirPath, env):
print("hFilePath:{}".format(hFilePath))
print("cppFilePath:{}".format(cppFilePath))
h = "#pragma once\n"
nsbegin = "\nnamespace at { namespace native { namespace vulkan { \n"
nsend = "\n} } } //namespace at::native::vulkan\n"
h += nsbegin
cpp = "#include <ATen/native/vulkan/{}>".format(H_NAME)
cpp += nsbegin
for templateGlslPath in templateGlslPaths:
name = getName(templateGlslPath)
h += "extern const char* " + name + ";\n"
cpp += "const char* " + name + " = \n"
codeTemplate = CodeTemplate.from_file(templateGlslPath)
srcPath = tmpDirPath + "/" + name + ".glsl"
content = codeTemplate.substitute(env)
lines = content.split("\n")
for l in lines:
if (len(l) < 1):
continue
cpp += "\"" + l + "\\n\"\n"
cpp += ";\n"
cpp += nsend
h += nsend
with open(hFilePath, "w") as f:
f.write(h)
with open(cppFilePath, "w") as f:
f.write(cpp)
def parse_arg_env(items):
d = {}
if items:
for item in items:
tokens = item.split("=")
key = tokens[0].strip()
value = tokens[1].strip()
d[key] = value
return d
def main(argv):
parser = argparse.ArgumentParser(description='Generate glsl.cpp and glsl.h containing glsl sources')
parser.add_argument(
'-i',
'--glsl-path',
help='path to directory with glsl to process',
required=True,
default='.')
parser.add_argument(
'-o',
'--output-path',
help='path to directory to generate glsl.h glsl.cpp (cpp namespace at::native::vulkan)',
required=True)
parser.add_argument(
'-t',
'--tmp-dir-path',
required=True,
help='/tmp')
parser.add_argument(
"--env",
metavar="KEY=VALUE",
nargs='*',
help="Set a number of key-value pairs")
options = parser.parse_args()
if not os.path.exists(options.tmp_dir_path):
os.makedirs(options.tmp_dir_path)
env = DEFAULT_ENV
for key, value in parse_arg_env(options.env).items():
env[key] = value
if not os.path.exists(options.output_path):
os.makedirs(options.output_path)
glsls = findAllGlsls(options.glsl_path)
genCppH(
options.output_path + "/" + H_NAME, options.output_path + "/" + CPP_NAME,
glsls,
tmpDirPath=options.tmp_dir_path,
env=env)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| pytorch-master | aten/src/ATen/gen_vulkan_glsl.py |
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import confu
from confu import arm, x86
parser = confu.standard_parser()
def main(args):
options = parser.parse_args(args)
build = confu.Build.from_options(options)
build.export_cpath("include", ["q8gemm.h"])
with build.options(
source_dir="src",
deps=[
build.deps.cpuinfo,
build.deps.clog,
build.deps.psimd,
build.deps.fxdiv,
build.deps.pthreadpool,
build.deps.FP16,
],
extra_include_dirs="src",
):
requantization_objects = [
build.cc("requantization/precise-scalar.c"),
build.cc("requantization/fp32-scalar.c"),
build.cc("requantization/q31-scalar.c"),
build.cc("requantization/gemmlowp-scalar.c"),
]
with build.options(isa=arm.neon if build.target.is_arm else None):
requantization_objects += [
build.cc("requantization/precise-psimd.c"),
build.cc("requantization/fp32-psimd.c"),
]
if build.target.is_x86 or build.target.is_x86_64:
with build.options(isa=x86.sse2):
requantization_objects += [
build.cc("requantization/precise-sse2.c"),
build.cc("requantization/fp32-sse2.c"),
build.cc("requantization/q31-sse2.c"),
build.cc("requantization/gemmlowp-sse2.c"),
]
with build.options(isa=x86.ssse3):
requantization_objects += [
build.cc("requantization/precise-ssse3.c"),
build.cc("requantization/q31-ssse3.c"),
build.cc("requantization/gemmlowp-ssse3.c"),
]
with build.options(isa=x86.sse4_1):
requantization_objects += [
build.cc("requantization/precise-sse4.c"),
build.cc("requantization/q31-sse4.c"),
build.cc("requantization/gemmlowp-sse4.c"),
]
if build.target.is_arm or build.target.is_arm64:
with build.options(isa=arm.neon if build.target.is_arm else None):
requantization_objects += [
build.cc("requantization/precise-neon.c"),
build.cc("requantization/fp32-neon.c"),
build.cc("requantization/q31-neon.c"),
build.cc("requantization/gemmlowp-neon.c"),
]
qnnpytorch_pack_objects = [
# Common parts
build.cc("init.c"),
build.cc("operator-delete.c"),
build.cc("operator-run.c"),
# Operators
build.cc("add.c"),
build.cc("average-pooling.c"),
build.cc("channel-shuffle.c"),
build.cc("clamp.c"),
build.cc("convolution.c"),
build.cc("indirection.c"),
build.cc("deconvolution.c"),
build.cc("fully-connected.c"),
build.cc("global-average-pooling.c"),
build.cc("hardsigmoid.c"),
build.cc("hardswish.c"),
build.cc("leaky-relu.c"),
build.cc("max-pooling.c"),
build.cc("sigmoid.c"),
build.cc("softargmax.c"),
build.cc("tanh.c"),
# Scalar micro-kernels
build.cc("u8lut32norm/scalar.c"),
build.cc("x8lut/scalar.c"),
]
with build.options(isa=arm.neon if build.target.is_arm else None):
qnnpytorch_pack_objects += [
build.cc("sconv/6x8-psimd.c"),
build.cc("sdwconv/up4x9-psimd.c"),
build.cc("sgemm/6x8-psimd.c"),
]
with build.options(isa=arm.neon if build.target.is_arm else None):
if build.target.is_arm or build.target.is_arm64:
qnnpytorch_pack_objects += [
build.cc("q8avgpool/mp8x9p8q-neon.c"),
build.cc("q8avgpool/up8x9-neon.c"),
build.cc("q8avgpool/up8xm-neon.c"),
build.cc("q8conv/4x8-neon.c"),
build.cc("q8conv/8x8-neon.c"),
build.cc("q8dwconv/mp8x25-neon.c"),
build.cc("q8dwconv/mp8x27-neon.c"),
build.cc("q8dwconv/up8x9-neon.c"),
build.cc("q8gavgpool/mp8x7p7q-neon.c"),
build.cc("q8gavgpool/up8x7-neon.c"),
build.cc("q8gavgpool/up8xm-neon.c"),
build.cc("q8gemm/4x-sumrows-neon.c"),
build.cc("q8gemm/4x8-neon.c"),
build.cc("q8gemm/4x8c2-xzp-neon.c"),
build.cc("q8gemm/6x4-neon.c"),
build.cc("q8gemm/8x8-neon.c"),
build.cc("q8vadd/neon.c"),
build.cc("sgemm/5x8-neon.c"),
build.cc("sgemm/6x8-neon.c"),
build.cc("u8clamp/neon.c"),
build.cc("u8maxpool/16x9p8q-neon.c"),
build.cc("u8maxpool/sub16-neon.c"),
build.cc("u8rmax/neon.c"),
build.cc("x8zip/x2-neon.c"),
build.cc("x8zip/x3-neon.c"),
build.cc("x8zip/x4-neon.c"),
build.cc("x8zip/xm-neon.c"),
]
if build.target.is_arm:
qnnpytorch_pack_objects += [
build.cc("hgemm/8x8-aarch32-neonfp16arith.S"),
build.cc("q8conv/4x8-aarch32-neon.S"),
build.cc("q8dwconv/up8x9-aarch32-neon.S"),
build.cc("q8gemm/4x8-aarch32-neon.S"),
build.cc("q8gemm/4x8c2-xzp-aarch32-neon.S"),
]
if build.target.is_arm64:
qnnpytorch_pack_objects += [
build.cc("q8gemm/8x8-aarch64-neon.S"),
build.cc("q8conv/8x8-aarch64-neon.S"),
]
if build.target.is_x86 or build.target.is_x86_64:
with build.options(isa=x86.sse2):
qnnpytorch_pack_objects += [
build.cc("q8avgpool/mp8x9p8q-sse2.c"),
build.cc("q8avgpool/up8x9-sse2.c"),
build.cc("q8avgpool/up8xm-sse2.c"),
build.cc("q8conv/4x4c2-sse2.c"),
build.cc("q8dwconv/mp8x25-sse2.c"),
build.cc("q8dwconv/mp8x27-sse2.c"),
build.cc("q8dwconv/up8x9-sse2.c"),
build.cc("q8gavgpool/mp8x7p7q-sse2.c"),
build.cc("q8gavgpool/up8x7-sse2.c"),
build.cc("q8gavgpool/up8xm-sse2.c"),
build.cc("q8gemm/2x4c8-sse2.c"),
build.cc("q8gemm/4x4c2-sse2.c"),
build.cc("q8vadd/sse2.c"),
build.cc("u8clamp/sse2.c"),
build.cc("u8maxpool/16x9p8q-sse2.c"),
build.cc("u8maxpool/sub16-sse2.c"),
build.cc("u8rmax/sse2.c"),
build.cc("x8zip/x2-sse2.c"),
build.cc("x8zip/x3-sse2.c"),
build.cc("x8zip/x4-sse2.c"),
build.cc("x8zip/xm-sse2.c"),
]
build.static_library("qnnpack", qnnpytorch_pack_objects)
with build.options(
source_dir="test",
deps={
(
build,
build.deps.cpuinfo,
build.deps.clog,
build.deps.pthreadpool,
build.deps.FP16,
build.deps.googletest,
): any,
"log": build.target.is_android,
},
extra_include_dirs=["src", "test"],
):
build.unittest("hgemm-test", build.cxx("hgemm.cc"))
build.unittest("q8avgpool-test", build.cxx("q8avgpool.cc"))
build.unittest("q8conv-test", build.cxx("q8conv.cc"))
build.unittest("q8dwconv-test", build.cxx("q8dwconv.cc"))
build.unittest("q8gavgpool-test", build.cxx("q8gavgpool.cc"))
build.unittest("q8gemm-test", build.cxx("q8gemm.cc"))
build.unittest("q8vadd-test", build.cxx("q8vadd.cc"))
build.unittest("sconv-test", build.cxx("sconv.cc"))
build.unittest("sgemm-test", build.cxx("sgemm.cc"))
build.unittest("u8clamp-test", build.cxx("u8clamp.cc"))
build.unittest("u8lut32norm-test", build.cxx("u8lut32norm.cc"))
build.unittest("u8maxpool-test", build.cxx("u8maxpool.cc"))
build.unittest("u8rmax-test", build.cxx("u8rmax.cc"))
build.unittest("x8lut-test", build.cxx("x8lut.cc"))
build.unittest("x8zip-test", build.cxx("x8zip.cc"))
build.unittest("add-test", build.cxx("add.cc"))
build.unittest("average-pooling-test", build.cxx("average-pooling.cc"))
build.unittest("channel-shuffle-test", build.cxx("channel-shuffle.cc"))
build.unittest("clamp-test", build.cxx("clamp.cc"))
build.unittest("convolution-test", build.cxx("convolution.cc"))
build.unittest("deconvolution-test", build.cxx("deconvolution.cc"))
build.unittest("fully-connected-test", build.cxx("fully-connected.cc"))
build.unittest(
"global-average-pooling-test", build.cxx("global-average-pooling.cc")
)
build.unittest("leaky-relu-test", build.cxx("leaky-relu.cc"))
build.unittest("max-pooling-test", build.cxx("max-pooling.cc"))
build.unittest("sigmoid-test", build.cxx("sigmoid.cc"))
build.unittest("softargmax-test", build.cxx("softargmax.cc"))
build.unittest("tanh-test", build.cxx("tanh.cc"))
build.unittest("hardsigmoid-test", build.cxx("hardsigmoid.cc"))
build.unittest("hardswish-test", build.cxx("hardswish.cc"))
build.unittest(
"requantization-test",
[build.cxx("requantization.cc")] + requantization_objects,
)
benchmark_isa = None
if build.target.is_arm:
benchmark_isa = arm.neon
elif build.target.is_x86:
benchmark_isa = x86.sse4_1
with build.options(
source_dir="bench",
deps={
(
build,
build.deps.cpuinfo,
build.deps.clog,
build.deps.pthreadpool,
build.deps.FP16,
build.deps.googlebenchmark,
): any,
"log": build.target.is_android,
},
isa=benchmark_isa,
extra_include_dirs="src",
):
build.benchmark("add-bench", build.cxx("add.cc"))
build.benchmark("average-pooling-bench", build.cxx("average-pooling.cc"))
build.benchmark("channel-shuffle-bench", build.cxx("channel-shuffle.cc"))
build.benchmark("convolution-bench", build.cxx("convolution.cc"))
build.benchmark(
"global-average-pooling-bench", build.cxx("global-average-pooling.cc")
)
build.benchmark("max-pooling-bench", build.cxx("max-pooling.cc"))
build.benchmark("sigmoid-bench", build.cxx("sigmoid.cc"))
build.benchmark("softargmax-bench", build.cxx("softargmax.cc"))
build.benchmark("tanh-bench", build.cxx("tanh.cc"))
build.benchmark("hardsigmoid-bench", build.cxx("hardsigmoid.cc"))
build.benchmark("hardswish-bench", build.cxx("hardswish.cc"))
build.benchmark("q8gemm-bench", build.cxx("q8gemm.cc"))
build.benchmark("hgemm-bench", build.cxx("hgemm.cc"))
build.benchmark("sgemm-bench", build.cxx("sgemm.cc"))
build.benchmark(
"requantization-bench",
[build.cxx("requantization.cc")] + requantization_objects,
)
return build
if __name__ == "__main__":
import sys
main(sys.argv[1:]).generate()
| pytorch-master | aten/src/ATen/native/quantized/cpu/qnnpack/configure.py |
import os
QNNPACK_SOURCES = {
# Generic functions
None: [
"requantization/fp32-psimd.c",
"requantization/fp32-scalar.c",
"requantization/gemmlowp-scalar.c",
"requantization/precise-psimd.c",
"requantization/precise-scalar.c",
"requantization/q31-scalar.c",
"sgemm/6x8-psimd.c",
"u8lut32norm/scalar.c",
"x8lut/scalar.c",
],
# AArch32/AArch64-specific uKernels
"defined(__arm__) || defined(__aarch64__)": [
"q8avgpool/mp8x9p8q-neon.c",
"q8avgpool/up8x9-neon.c",
"q8avgpool/up8xm-neon.c",
"q8conv/4x8-neon.c",
"q8conv/8x8-neon.c",
"q8dwconv/mp8x25-neon.c",
"q8dwconv/mp8x25-neon-per-channel.c",
"q8dwconv/mp8x27-neon.c",
"q8dwconv/up8x9-neon.c",
"q8dwconv/up8x9-neon-per-channel.c",
"q8gavgpool/mp8x7p7q-neon.c",
"q8gavgpool/up8x7-neon.c",
"q8gavgpool/up8xm-neon.c",
"q8gemm/4x-sumrows-neon.c",
"q8gemm/4x8-neon.c",
"q8gemm/4x8-dq-neon.c",
"q8gemm/4x8c2-xzp-neon.c",
"q8gemm/6x4-neon.c",
"q8gemm/8x8-neon.c",
"q8vadd/neon.c",
"requantization/fp32-neon.c",
"requantization/gemmlowp-neon.c",
"requantization/precise-neon.c",
"requantization/q31-neon.c",
"sgemm/5x8-neon.c",
"sgemm/6x8-neon.c",
"u8clamp/neon.c",
"u8maxpool/16x9p8q-neon.c",
"u8maxpool/sub16-neon.c",
"u8rmax/neon.c",
"x8zip/x2-neon.c",
"x8zip/x3-neon.c",
"x8zip/x4-neon.c",
"x8zip/xm-neon.c",
],
# x86/x86-64-specific uKernels
"defined(__i386__) || defined(__i686__) || defined(__x86_64__)": [
"q8avgpool/mp8x9p8q-sse2.c",
"q8avgpool/up8x9-sse2.c",
"q8avgpool/up8xm-sse2.c",
"q8conv/4x4c2-sse2.c",
"q8dwconv/mp8x25-sse2.c",
"q8dwconv/mp8x25-sse2-per-channel.c",
"q8dwconv/mp8x27-sse2.c",
"q8dwconv/up8x9-sse2.c",
"q8dwconv/up8x9-sse2-per-channel.c",
"q8gavgpool/mp8x7p7q-sse2.c",
"q8gavgpool/up8x7-sse2.c",
"q8gavgpool/up8xm-sse2.c",
"q8gemm/2x4c8-sse2.c",
"q8gemm/4x4c2-dq-sse2.c",
"q8gemm/4x4c2-sse2.c",
"q8vadd/sse2.c",
"requantization/fp32-sse2.c",
"requantization/gemmlowp-sse2.c",
"requantization/gemmlowp-sse4.c",
"requantization/gemmlowp-ssse3.c",
"requantization/precise-sse2.c",
"requantization/precise-sse4.c",
"requantization/precise-ssse3.c",
"requantization/q31-sse2.c",
"requantization/q31-sse4.c",
"requantization/q31-ssse3.c",
"u8clamp/sse2.c",
"u8maxpool/16x9p8q-sse2.c",
"u8maxpool/sub16-sse2.c",
"u8rmax/sse2.c",
"x8zip/x2-sse2.c",
"x8zip/x3-sse2.c",
"x8zip/x4-sse2.c",
"x8zip/xm-sse2.c",
],
# AArch32-specific uKernels
"defined(__arm__)": [
"hgemm/8x8-aarch32-neonfp16arith.S",
"q8conv/4x8-aarch32-neon.S",
"q8dwconv/up8x9-aarch32-neon.S",
"q8dwconv/up8x9-aarch32-neon-per-channel.S",
"q8gemm/4x8-aarch32-neon.S",
"q8gemm/4x8-dq-aarch32-neon.S",
"q8gemm/4x8c2-xzp-aarch32-neon.S",
],
# AArch64-specific uKernels
"defined(__aarch64__)": [
"q8conv/8x8-aarch64-neon.S",
"q8gemm/8x8-aarch64-neon.S",
"q8gemm/8x8-dq-aarch64-neon.S",
],
}
BANNER = "/* Auto-generated by generate-wrappers.py script. Do not modify */"
if __name__ == "__main__":
for condition, filenames in QNNPACK_SOURCES.items():
for filename in filenames:
filepath = os.path.join("wrappers", filename)
if not os.path.isdir(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
with open(filepath, "w") as wrapper:
print(BANNER, file=wrapper)
print(file=wrapper)
# Architecture- or platform-dependent preprocessor flags can be
# defined here. Note: platform_preprocessor_flags can't be used
# because they are ignored by arc focus & buck project.
if condition is None:
print("#include <%s>" % filename, file=wrapper)
else:
# Include source file only if condition is satisfied
print("#if %s" % condition, file=wrapper)
print("#include <%s>" % filename, file=wrapper)
print("#endif /* %s */" % condition, file=wrapper)
| pytorch-master | aten/src/ATen/native/quantized/cpu/qnnpack/generate-wrapper.py |
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import confu
parser = confu.standard_parser("clog configuration script")
def main(args):
options = parser.parse_args(args)
build = confu.Build.from_options(options)
build.export_cpath("include", ["clog.h"])
with build.options(source_dir="src", extra_include_dirs="src"):
build.static_library("clog", build.cc("clog.c"))
with build.options(source_dir="test", deps={
(build, build.deps.googletest): all,
"log": build.target.is_android}):
build.unittest("clog-test", build.cxx("clog.cc"))
return build
if __name__ == "__main__":
import sys
main(sys.argv[1:]).generate()
| pytorch-master | aten/src/ATen/native/quantized/cpu/qnnpack/deps/clog/configure.py |
#!/usr/bin/env python3
"""
Code generator for NNAPI wrapper. We can't link directly against
libneuralnetworks.so because we want PyTorch to work on Android
devices that don't have it available. Instead, we generate a wrapper
that opens libneuralnetworks.so with dlopen and finds the functions
we need with dlsym. We also generate a "check" wrapper that checks
return values and throws C++ exceptions on errors.
"""
import sys
import re
import pathlib
import textwrap
PREFIX = """\
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// This file is generated by nnapi/codegen.py
"""
NNAPI_FUNCTIONS = [
("int", "ANeuralNetworks_getDeviceCount", "uint32_t* numDevices"), # noqa: B950
("int", "ANeuralNetworks_getDevice", "uint32_t devIndex, ANeuralNetworksDevice** device"), # noqa: B950
("int", "ANeuralNetworksDevice_getName", "const ANeuralNetworksDevice* device, const char** name"), # noqa: B950
("int", "ANeuralNetworksDevice_getVersion", "const ANeuralNetworksDevice* device, const char** version"), # noqa: B950
("int", "ANeuralNetworksDevice_getFeatureLevel", "const ANeuralNetworksDevice* device, int64_t* featureLevel"), # noqa: B950
("int", "ANeuralNetworksModel_getSupportedOperationsForDevices", " const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, bool* supportedOps"), # noqa: B950
("int", "ANeuralNetworksCompilation_createForDevices", "ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, ANeuralNetworksCompilation** compilation"), # noqa: B950
("int", "ANeuralNetworksExecution_compute", "ANeuralNetworksExecution* execution"), # noqa: B950
("int", "ANeuralNetworksMemory_createFromFd", "size_t size, int protect, int fd, size_t offset, ANeuralNetworksMemory** memory"), # noqa: B950
("void", "ANeuralNetworksMemory_free", "ANeuralNetworksMemory* memory"), # noqa: B950
("int", "ANeuralNetworksModel_create", "ANeuralNetworksModel** model"), # noqa: B950
("void", "ANeuralNetworksModel_free", "ANeuralNetworksModel* model"), # noqa: B950
("int", "ANeuralNetworksModel_finish", "ANeuralNetworksModel* model"), # noqa: B950
("int", "ANeuralNetworksModel_addOperand", "ANeuralNetworksModel* model, const ANeuralNetworksOperandType* type"), # noqa: B950
("int", "ANeuralNetworksModel_setOperandValue", "ANeuralNetworksModel* model, int32_t index, const void* buffer, size_t length"), # noqa: B950
("int", "ANeuralNetworksModel_setOperandValueFromMemory", "ANeuralNetworksModel* model, int32_t index, const ANeuralNetworksMemory* memory, size_t offset, size_t length"), # noqa: B950
("int", "ANeuralNetworksModel_addOperation", "ANeuralNetworksModel* model, ANeuralNetworksOperationType type, uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, const uint32_t* outputs"), # noqa: B950
("int", "ANeuralNetworksModel_identifyInputsAndOutputs", "ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, const uint32_t* outputs"), # noqa: B950
("int", "ANeuralNetworksModel_relaxComputationFloat32toFloat16", "ANeuralNetworksModel* model, bool allow"), # noqa: B950
("int", "ANeuralNetworksCompilation_create", "ANeuralNetworksModel* model, ANeuralNetworksCompilation** compilation"), # noqa: B950
("void", "ANeuralNetworksCompilation_free", "ANeuralNetworksCompilation* compilation"), # noqa: B950
("int", "ANeuralNetworksCompilation_setPreference", "ANeuralNetworksCompilation* compilation, int32_t preference"), # noqa: B950
("int", "ANeuralNetworksCompilation_finish", "ANeuralNetworksCompilation* compilation"), # noqa: B950
("int", "ANeuralNetworksExecution_create", "ANeuralNetworksCompilation* compilation, ANeuralNetworksExecution** execution"), # noqa: B950
("void", "ANeuralNetworksExecution_free", "ANeuralNetworksExecution* execution"), # noqa: B950
("int", "ANeuralNetworksExecution_setInput", "ANeuralNetworksExecution* execution, int32_t index, const ANeuralNetworksOperandType* type, const void* buffer, size_t length"), # noqa: B950
("int", "ANeuralNetworksExecution_setInputFromMemory", "ANeuralNetworksExecution* execution, int32_t index, const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory, size_t offset, size_t length"), # noqa: B950
("int", "ANeuralNetworksExecution_setOutput", "ANeuralNetworksExecution* execution, int32_t index, const ANeuralNetworksOperandType* type, void* buffer, size_t length"), # noqa: B950
("int", "ANeuralNetworksExecution_setOutputFromMemory", "ANeuralNetworksExecution* execution, int32_t index, const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory, size_t offset, size_t length"), # noqa: B950
("int", "ANeuralNetworksExecution_startCompute", "ANeuralNetworksExecution* execution, ANeuralNetworksEvent** event"), # noqa: B950
("int", "ANeuralNetworksEvent_wait", "ANeuralNetworksEvent* event"), # noqa: B950
("void", "ANeuralNetworksEvent_free", "ANeuralNetworksEvent* event"), # noqa: B950
("int", "ANeuralNetworksExecution_getOutputOperandRank", "ANeuralNetworksExecution* execution, int32_t index, uint32_t* rank"), # noqa: B950
("int", "ANeuralNetworksExecution_getOutputOperandDimensions", "ANeuralNetworksExecution* execution, int32_t index, uint32_t* dimensions"), # noqa: B950
]
def main(argv):
struct_members = []
load_functions = []
define_checks = []
for ret, name, args in NNAPI_FUNCTIONS:
short_name = name.replace("ANeuralNetworks", "", 1)
struct_members.append(f" {ret}(*{short_name})({args});")
load_functions.append(f' *(void**)&nnapi_.{short_name} = dlsym(handle, "{name}");')
load_functions.append(f' check_nnapi_.{short_name} = check_{short_name};')
call_args = "".join(re.findall(r"\w+(?:,|$)", args))
if ret == "void":
define_checks.append(textwrap.dedent(f"""\
{ret} check_{short_name}({args}) {{
CAFFE_ENFORCE(nnapi_.{short_name});
nnapi_.{short_name}({call_args});
}}"""))
if ret == "int":
define_checks.append(textwrap.dedent(f"""\
{ret} check_{short_name}({args}) {{
CAFFE_ENFORCE(nnapi_.{short_name});
int ret = nnapi_.{short_name}({call_args});
// TODO: Maybe add better logging here.
CAFFE_ENFORCE(
ret == ANEURALNETWORKS_NO_ERROR,
"{short_name}", "failed with error ", ret
);
return ret;
}}"""))
out_dir = pathlib.Path(__file__).parent
(out_dir / "nnapi_wrapper.h").write_text(
PREFIX +
textwrap.dedent("""\
#ifndef NNAPI_WRAPPER_H_
#define NNAPI_WRAPPER_H_
#include <stddef.h>
#include <stdint.h>
#include <ATen/nnapi/NeuralNetworks.h>
struct nnapi_wrapper {
__STRUCT_MEMBERS__
};
#ifdef __cplusplus
void nnapi_wrapper_load(struct nnapi_wrapper** nnapi, struct nnapi_wrapper** check_nnapi);
#endif
#endif
""")
.replace("__STRUCT_MEMBERS__", "\n".join(struct_members))
)
(out_dir / "nnapi_wrapper.cpp").write_text(
PREFIX +
textwrap.dedent("""\
#ifndef _WIN32
#include <dlfcn.h>
#endif
#include <ATen/nnapi/nnapi_wrapper.h>
#include <c10/util/Logging.h>
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
static int loaded = 0;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
static struct nnapi_wrapper nnapi_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
static struct nnapi_wrapper check_nnapi_;
__DEFINE_CHECK_FUNCTIONS__
void nnapi_wrapper_load(struct nnapi_wrapper** nnapi, struct nnapi_wrapper** check_nnapi) {
#ifdef _WIN32
TORCH_CHECK(false, "Running NNAPI models is not supported on Windows.");
#else
if (!loaded) {
// Clear error flag.
dlerror();
void* handle = dlopen("libneuralnetworks.so", RTLD_LAZY | RTLD_LOCAL);
CAFFE_ENFORCE(handle, "Failed to load libneuralnetworks.so ", dlerror());
__LOAD_FUNCTIONS__
loaded = 1;
}
*nnapi = &nnapi_;
*check_nnapi = &check_nnapi_;
#endif
}
""")
.replace("__DEFINE_CHECK_FUNCTIONS__", "\n".join(define_checks))
.replace("__LOAD_FUNCTIONS__", "\n".join(load_functions))
)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| pytorch-master | aten/src/ATen/nnapi/codegen.py |
from collections import defaultdict
from typing import Dict, List, Optional, Sequence, Tuple, Union
import torchgen.api.dispatcher as dispatcher
from torchgen.api.translate import translate
from torchgen.api.types import Binding, DispatcherSignature, Expr
from torchgen.context import with_native_function
from torchgen.model import (
Annotation,
Argument,
BackendIndex,
BackendMetadata,
BaseOperatorName,
BaseTy,
BaseType,
DEFAULT_KERNEL_NAMESPACE,
DeviceCheckType,
DispatchKey,
FunctionSchema,
NativeFunction,
NativeFunctionsGroup,
OperatorName,
Return,
SchemaKind,
Variant,
)
from torchgen.utils import concatMap
# See Note: [Out ops with functional variants that don't get grouped properly]
OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY = [
# This has a functional variant, but it's currently marked private.
# This function should be marked private as well (*_backward ops aren't exposed to python anyway).
"adaptive_avg_pool3d_backward.grad_input",
# There's a functional variant, _slow_conv2d_backward.output_mask, that isn't grouped properly.
# Maybe we can kill this operator in favor of convolution_backward?
"_slow_conv2d_backward.grad_input",
]
# See Note: [Mutable ops that cannot get an out variant]
MUTABLE_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT = [
# should be out=?
"_cummax_helper",
# should be out=?
"_cummin_helper",
]
# All of these operators don't have any tensor like returns
FUNCTIONAL_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT = [
"_assert_async", # no return
"_dimI", # returns an int
"_dimV", # returns an int
"_has_same_storage_numel", # returns a boolean
"_linalg_check_errors", # no return
"_local_scalar_dense", # returns a Scalar
"_nested_tensor_from_mask_left_aligned", # returns a boolean
"_nnz", # returns an int
"_use_cudnn_ctc_loss", # returns a boolean
"_validate_compressed_sparse_indices", # no return
"allclose", # returns a boolean
"dense_dim", # returns an int
"equal", # returns a boolean
"is_coalesced", # returns an boolean
"is_pinned", # returns a boolean
"is_same_size", # returns a boolean
"is_set_to", # returns a boolean
"q_per_channel_axis", # returns an int
"q_scale", # returns a float
"q_zero_point", # returns an int
"qscheme", # returns a QScheme
"record_stream", # no return
"sparse_dim", # returns an int
]
INPLACE_OPS_THAT_DONT_GET_GROUPED_PROPERLY = [
# polygamma and polygamma.out both exist, but have a
# pre-self arg (while polygamma_ does not)
# We should either fix this schema so it can be grouped properly,
# or allow the codegen to generate new functional/out= NativeFunctions for this op
# (which would require changing its overload name to prevent overload ambiguity).
"polygamma_"
]
# Groups "similar" NativeFunctions together
# example add.Tensor, add_.Tensor, add.out
# "similar" NativeFunctions are all expected to have an identical `signature()`,
# But have differing SchemaKinds.
def pre_group_native_functions(
native_functions: Sequence[NativeFunction],
) -> Dict[FunctionSchema, Dict[SchemaKind, NativeFunction]]:
pre_grouped_native_functions: Dict[
FunctionSchema, Dict[SchemaKind, NativeFunction]
] = defaultdict(dict)
for f in native_functions:
d = pre_grouped_native_functions[f.func.signature()]
assert f.func.kind() not in d
d[f.func.kind()] = f
return pre_grouped_native_functions
# Returns the out variant overload name given a base function overload name
def get_expected_out_variant_overload_name(overload_name: Optional[str]) -> str:
return "out" if not overload_name else f"{overload_name}_out"
# Helper function: given an inplace FunctionSchema, generate its corresponding out= variant
# Example before:
# _add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
# Example after:
# _add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out)
def self_to_out_signature(func: FunctionSchema) -> FunctionSchema:
# Generating an out= schema from an inplace schema.
assert func.kind() == SchemaKind.inplace
assert func.arguments.self_arg is not None
# The new out= schema has:
# - a new out argument with the same type as "func" (but with a mutable annotation)
# - The returns (if any) now alias the out= argument instead of "func"
# - an "out" overload name
return FunctionSchema(
name=func.name.remove_inplace().with_overload(
get_expected_out_variant_overload_name(func.name.overload_name)
),
arguments=func.arguments.remove_self_annotation().with_out_args(
[
Argument(
name="out",
type=func.arguments.self_arg.argument.type,
default=None,
annotation=func.arguments.self_arg.argument.annotation,
)
]
),
returns=func.returns,
)
# Helper function: given a functional FunctionSchema, generate its corresponding out= variant
# Example before:
# _to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None,
# bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
# Example after:
# _to_copy._out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None,
# Tensor(a!) out) -> Tensor(a!)
def functional_to_out_signature(func: FunctionSchema) -> FunctionSchema:
# Generating an out= schema from a functional schema.
assert func.kind() == SchemaKind.functional
new_returns, new_out_args = generate_out_args_from_schema(func)
# The new out= schema has:
# - one or more new out argument(s) with the same type as returns (but with a mutable annotation)
# - The returns now alias the out= arguments
# - an "_out" overload name
return FunctionSchema(
name=func.name.with_overload(
get_expected_out_variant_overload_name(func.name.overload_name)
),
arguments=func.arguments.signature().with_out_args(
new_out_args,
),
returns=tuple(new_returns),
)
# Helper function: given a function schema, generate corresponding out arguments, also the updated return annotations.
def generate_out_args_from_schema(
func: FunctionSchema,
) -> Tuple[List[Return], List[Argument]]:
# More of a sanity check - our existing restrictions on schemas should enforce that
# mutable schema kinds never return their mutable arguments.
assert not any(
r.annotation is not None and r.annotation.is_write for r in func.returns
)
tensorlike_rets = [r for r in func.returns if r.type.is_tensor_like()]
assert len(tensorlike_rets) > 0
used_annotations = concatMap(
lambda a: [] if a.annotation is None else a.annotation.alias_set,
func.arguments.flat_all,
)
valid_annotations = [
x for x in "abcdefghijklmnopqrstuvwxyz" if x not in used_annotations
]
all_rets_are_tensors = all(r.type == BaseType(BaseTy.Tensor) for r in func.returns)
new_out_args: List[Argument] = []
# The end result of new_returns is that:
# - If every return is a plain tensor, then the new returns == the old returns, but with the out= alias annotations added.
# - Otherwise, none of the out arguments show up in the returns (and we're only left with non-tensor-like returns, if any).
new_returns: List[Return] = []
for (i, r) in enumerate(func.returns):
if r.type.is_tensor_like():
new_out = Argument(
name="out" if len(func.returns) == 1 else f"out{i}",
type=r.type,
default=None,
annotation=Annotation.parse(f"{valid_annotations[i]}!"),
)
new_out_args.append(new_out)
if all_rets_are_tensors:
# The convention for out= schemas is that they only return their out arguments
# if the return is a plain Tensor (or if it's a tuple of plain Tensors)
new_ret = Return(
name=None, type=new_out.type, annotation=new_out.annotation
)
new_returns.append(new_ret)
else:
new_returns.append(r)
return new_returns, new_out_args
# Helper function: given a mutable FunctionSchema, generate its corresponding out= variant
# Example before:
# _fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask) # noqa: B950
# Example after:
# _fused_moving_avg_obs_fq_helper._out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!)) # noqa: B950
def mutable_to_out_signature(func: FunctionSchema) -> FunctionSchema:
# Generating an out= schema from a mutable schema.
assert func.kind() == SchemaKind.mutable
# The new out= schema has:
# - Any non-aliased tensor-like returns are converted to mutable, aliased out= arguments
# (if the argument is a tensor then we also return it for method chaining,
# otherwise we return nothing)
# - an "out" overload name
#
# Note that:
# (1) This also means that we can *only* generate an out= variant from a mutable schema
# if the mutable schema has at least one tensor-like non-aliasing return.
# (2) The generated out= variant still has mutable positional arguments,
# but if necessary we could probably add another out= variant that also
# functionalizes the mutable arguments (a functional_out variant)
new_returns, new_out_args = generate_out_args_from_schema(func)
return FunctionSchema(
name=func.name.remove_inplace().with_overload(
get_expected_out_variant_overload_name(func.name.overload_name)
),
arguments=func.arguments.with_out_args(new_out_args),
returns=tuple(new_returns),
)
# This function, given function of one SchemaKind, as well as a target SchemaKind,
# generates a new NativeFunction with the same properties, but using the target SchemaKind.
# We only actually generate functions for either functional or out= SchemaKinds.
# This function returns a tuple, with:
# - The generated NativeFunction
# - a dictionary of `BackendIndex` objects, describing which dispatch keys
# we will generate kernels for, for the new NativeFunction.
# Details are in the function, but we only generate composite kernels (in some cases) today.
def generate_function(
f: NativeFunction, k: SchemaKind
) -> Tuple[NativeFunction, Dict[DispatchKey, Dict["OperatorName", "BackendMetadata"]]]:
from torchgen.api import cpp
if k == SchemaKind.functional:
assert f.func.kind() != SchemaKind.functional
# The new "functional" NativeFunction has:
# - any mutable arguments have been converted into (immutable) returns.
# (if a mutable argument was not also a return, it gets converted to one)
# - "_functional" appended to the base name, ONLY IF this op has a mutable variant.
# See Note [Overload Ambiguity With Functional Variants]
# The default grouping logic in signature() actually already does this,
# so we can piggy-back off it (but we still want return names)
func = f.func.signature(keep_return_names=True).with_name(
OperatorName(
name=BaseOperatorName(
base=f.func.name.name.base,
inplace=False,
dunder_method=f.func.name.name.dunder_method,
# See Note [Overload Ambiguity With Functional Variants]
functional_overload=f.func.kind() == SchemaKind.mutable,
),
overload_name=f.func.name.overload_name,
)
)
elif k == SchemaKind.out:
# We generate out= ops mostly just so that we can pair up NativeFunctions into groups easily,
# but at least today, there is no good reason to actually use them.
# we'll generate a dispatcher entry for them, but won't actually register any kernels for them.
if f.func.kind() == SchemaKind.inplace:
func = self_to_out_signature(f.func)
elif f.func.kind() == SchemaKind.mutable:
func = mutable_to_out_signature(f.func)
elif f.func.kind() == SchemaKind.functional:
func = functional_to_out_signature(f.func)
else:
raise AssertionError(
"We only bother generating out= functions from either inplace or mutable or functional variants"
)
else:
raise AssertionError(
"We currently only generate either functional or out= NativeFunctions"
)
# Generated kernel naming convention for out: <op_name>_<overload_name>. The reason for this is to
# disambiguate operator with the same name but different overload name, e.g., `randn.names_out` and
# `randn.generator_with_names_out`.
kernel_name = (
func.name.unambiguous_name()
if func.kind() == SchemaKind.out
else cpp.name(func)
)
backend_metadata = {
DispatchKey.CompositeExplicitAutograd: {
func.name: BackendMetadata(
kernel=kernel_name,
structured=False,
cpp_namespace=DEFAULT_KERNEL_NAMESPACE,
)
}
}
return (
NativeFunction(
func=func,
use_const_ref_for_mutable_tensors=f.use_const_ref_for_mutable_tensors,
# These generated fn's aren't meant to be user friendly- don't generate methods.
variants=set([Variant.function]),
structured=False,
structured_delegate=None,
structured_inherits=None,
precomputed=None,
autogen=[],
ufunc_inner_loop={},
manual_kernel_registration=False,
manual_cpp_binding=False,
python_module=None,
category_override=None,
device_guard=False,
device_check=DeviceCheckType.NoCheck,
loc=f.loc,
cpp_no_default_args=set(),
is_abstract=f.is_abstract,
has_composite_implicit_autograd_kernel=False,
has_composite_explicit_autograd_kernel=True,
has_composite_explicit_autograd_non_functional_kernel=False,
# Every generated NativeFunction gets a "generated" tag, so it's easy to tell
# which NativeFunction objects did not come directly from native_functions.yaml.
tags=set(["generated"]),
namespace=f.namespace,
),
backend_metadata,
)
# This function is responsible for adding generated NativeFunctions which don't appear
# explicitly in the codegen.
# You can inspect the full list of NativeFunctions yourself with the torchgen package, by running
# torchgen.parse_native_yaml("aten/src/ATen/native/native_functions.yaml", "aten/src/ATen/native/tags.yaml")
# (Maybe we should make a friendly API for this)
#
# Note: this function *mutates* its two inputs,
# adding the new NativeFunctions / BackendMetadata to them
def add_generated_native_functions(
rs: List[NativeFunction],
indices: Dict[DispatchKey, Dict[OperatorName, BackendMetadata]],
) -> None:
# The main code for gnerating new NativeFunctions
# First we group of NaitveFunctions by schema kind,
# then we detect which ones are missing and generate them.
pre_grouped_native_functions = pre_group_native_functions(rs)
for k, d in pre_grouped_native_functions.items():
has_functional = SchemaKind.functional in d
has_inplace = SchemaKind.inplace in d
has_mutable = SchemaKind.mutable in d
has_out = SchemaKind.out in d
# We automatically generate a few native functions that don't exist in the yaml, for a few reasons:
# (1) If an operator has an inplace/out= variant but no functional variant, we can generate
# a simple functional variant that the functionalization pass can consume.
# (2) If an operator has an inplace or functional but no out= variant, we generate an out=
# variant, mostly so we can easily pair up functions into NativeFunctionsGroup,
# while maintaining the constraint that the out= variant is "required".
if has_mutable or has_inplace or has_out or has_functional:
# Don't bother generating functions trio's for native functions that bypass the dispatcher.
are_manual = all(f.manual_cpp_binding for f in d.values())
# Don't bother generating functional + out= variants for view operators
has_view_ops = any(f.is_view_op for f in d.values())
# Don't generate the other variants for CompositeImplicitAutograd operators.
# We could probably do this, but the main benefit of generating the function triplets
# is for transforms that need them, and transforms don't need to act directly
# on CompositeImplicitAutograd operators (since we let them decompose).
are_composite_implicit = all(
f.has_composite_implicit_autograd_kernel for f in d.values()
)
if are_manual or has_view_ops or are_composite_implicit:
continue
if has_out and len(d.values()) == 1:
# Note: [Out ops with functional variants that don't get grouped properly]
# In theory we could validly have an out= operator in native_functions.yaml
# that has no other variants.
# But today, all of the operators where that's the case actually do have
# functional variants, that we are just unable to pair up properly.
# I think banning this all together is probably safer
# (you can always add a functional variant yourself if you want to add a new out= operator).
#
# We should probably fix the existing cases; this check is to prevent us from adding more over time.
if (
str(d[SchemaKind.out].func.name)
not in OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY
):
raise AssertionError(
f"Found an out= operator that we could not find any other variants of: {str(d[SchemaKind.out].func)}"
)
continue
# Some inplace ops that have problematic schemas (that we should fix), which prevent us
# from generating out= and functional variants
if (
has_inplace
and str(d[SchemaKind.inplace].func.name)
in INPLACE_OPS_THAT_DONT_GET_GROUPED_PROPERLY
):
continue
base_fn = (
d[SchemaKind.inplace]
if has_inplace
else d[SchemaKind.mutable]
if has_mutable
else d[SchemaKind.out]
if has_out
else d[SchemaKind.functional]
)
# Note: [Mutable ops that cannot get an out variant]
# We can only generate an out= variant if either:
# - the original function has tensor-like returns (since we can convert them to out kwargs)
# - or it's inplace (since we can convert `self` to an out kwarg)
# There are only two functions that don't fit this criteria today though,
# and they both look like they should be fixed to be out= variants,
# so if feels safer to ban this schema all-together
base_fn_valid = base_fn.func.kind() == SchemaKind.inplace or any(
r.type.is_tensor_like() for r in base_fn.func.returns
)
# Note: [Loosen the assertion that all functional should have out variant]
# By design all functional operators should have our variants. The needs_out check
# is loosening this requirement, changing it to only generate out variant if there's
# an `autogen` block in the native function, in the long run it should be removed.
# FIXME: Remove this after figuring out CI job failures related to min, max, mean
needs_out = any("out" in str(op_name) for op_name in base_fn.autogen)
gets_out_variant = not has_out and base_fn_valid and needs_out
if not has_out and not base_fn_valid:
if (
str(base_fn.func.name)
not in MUTABLE_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT
and str(base_fn.func.name)
not in FUNCTIONAL_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT
):
raise AssertionError(
f"""Found an operator that we could not generate an out= variant for: {str(base_fn.func)}.
This type of operators don't have tensor-like return, making it difficult to generate a proper out= variant. If
out= variant is not needed, please add the function name into FUNCTIONAL_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT list."""
)
# Generate an out= variant
if gets_out_variant:
fn, metadata = generate_function(base_fn, SchemaKind.out)
d[SchemaKind.out] = fn
BackendIndex.grow_index(indices, metadata)
rs.append(fn)
# Generate a functional variant, but only do it if the operator got an out= variant
# (Functional variants are only useful if we can group up the variants,
# which we can only do if they have an out= variant)
if not has_functional and (has_out or gets_out_variant):
fn, metadata = generate_function(base_fn, SchemaKind.functional)
d[SchemaKind.functional] = fn
BackendIndex.grow_index(indices, metadata)
rs.append(fn)
def return_str(rets: Tuple[Return, ...], names: List[str]) -> str:
assert len(rets) == len(names)
if len(rets) == 0:
return ""
elif len(rets) == 1:
return f"return {names[0]};"
else:
return f"return {dispatcher.returns_type(rets).cpp_type()}({', '.join(names)});"
# Given a function, and the name of a variable correponding to the output of that function,
# gather up all of the individual returns that are not aliased
def gather_nonaliased_inner_rets(func: FunctionSchema, out_var: str) -> List[str]:
aliased_rets = func.aliased_return_names()
non_aliased_names = []
is_out_var_a_tuple = len(func.returns) > 1
for (i, r) in enumerate(aliased_rets):
if r is None:
non_aliased_names.append(
f"std::get<{i}>({out_var})" if is_out_var_a_tuple else out_var
)
return non_aliased_names
# Generates functional kernels in terms of their inplace.mutable counterparts.
# We only do this for "generated" NativeFunctions
@with_native_function
def gen_composite_functional_kernel(g: NativeFunctionsGroup) -> Optional[str]:
# We should only be generating these for code-generated NativeFunctions
if "generated" not in g.functional.tags:
return None
# And we always write the kernel for a generated op in terms of a non-generated op.
if g.inplace is not None and "generated" not in g.inplace.tags:
target_f = g.inplace
elif g.mutable is not None and "generated" not in g.mutable.tags:
target_f = g.mutable
else:
# We should be guaranteed to have a valid inplace/mutable variant to call into.
# See Note: [Mutable Ops Not Using Functionalization]
raise AssertionError(str(g.functional.func))
sig = DispatcherSignature(g.functional.func)
target_sig = DispatcherSignature(target_f.func)
context: List[Union[Binding, Expr]] = []
clone_mutable_inputs = []
cloned_return_names = []
# We can't just directly pass all of the arguments from the functional op into the mutating op.
# We need to check for which inputs to the mutating operator are mutable,
# and clone those inputs first.
for a_curr, a_tgt in zip(
dispatcher.jit_arguments(g.functional.func),
dispatcher.jit_arguments(target_f.func),
):
if a_tgt.annotation is not None and a_tgt.annotation.is_write:
clone_mutable_inputs.append(
f"auto {a_curr.name}_clone = clone_arg({a_curr.name});"
)
context.append(
Expr(
expr=f"{a_curr.name}_clone",
type=dispatcher.argument_type(a_curr, binds=a_curr.name),
)
)
# Invariant: mutable arguments on the inner mutable op are always returns on the functional op.
cloned_return_names.append(f"{a_curr.name}_clone")
else:
context.append(dispatcher.argument(a_curr))
exprs = ", ".join([e.expr for e in translate(context, target_sig.arguments())])
out_name = "output"
maybe_assign = f"auto {out_name} = " if len(target_f.func.returns) > 0 else ""
inner_return_names = gather_nonaliased_inner_rets(target_f.func, out_name)
ret_str = return_str(
g.functional.func.returns, inner_return_names + cloned_return_names
)
clone_mutable_inputs_str = "\n".join(clone_mutable_inputs)
return f"""
{sig.defn()} {{
{clone_mutable_inputs_str}
{maybe_assign}at::_ops::{target_f.func.name.unambiguous_name()}::call({exprs});
{ret_str}
}}
"""
# Generates out= kernels in terms of their functional counterparts.
# We only do this for "generated" NativeFunctions
@with_native_function
def gen_composite_out_kernel(g: NativeFunctionsGroup) -> Optional[str]:
# We should only be generating these for code-generated NativeFunctions
if "generated" not in g.out.tags:
return None
# And we always write the kernel for the out= op in terms of the functional.
# Note that the functional op might have also been generated, but we don't have to
# worry about cycles, because the generated functional kernels are always implemented
# in terms of non-generated kernels (see gen_composite_functional_kernel).
sig = DispatcherSignature(g.out.func)
target_sig = DispatcherSignature(g.functional.func)
exprs = ", ".join(
[e.expr for e in translate(sig.arguments(), target_sig.arguments())]
)
copy_outs = []
out_name = "tmp_output"
for i, out_arg in enumerate(g.out.func.arguments.out):
functional_return_name = (
out_name
if len(g.functional.func.returns) == 1
else f"std::get<{i}>({out_name})"
)
copy_outs.append(
f"""\
resize_out_helper({out_arg.name}, {functional_return_name});
copy_arg({out_arg.name}, {functional_return_name});"""
)
rets = []
# For each return arg in the calling (out=) operator,
# If it corresponds to an aliased input, return the input.
# Otherwise, return the corresponding output from calling the functional operator.
for i, ret_name in enumerate(g.out.func.aliased_return_names()):
if ret_name is not None:
rets.append(ret_name)
else:
functional_return_name = (
out_name
if len(g.functional.func.returns) == 1
else f"std::get<{i}>({out_name})"
)
rets.append(functional_return_name)
copy_outs_str = "\n".join(copy_outs)
# Kernel name needs to follow the naming convention defined in `generate_function()`
return f"""
{sig.defn(name=g.out.func.name.unambiguous_name())} {{
auto {out_name} = at::_ops::{g.functional.func.name.unambiguous_name()}::call({exprs});
{copy_outs_str}
{return_str(g.out.func.returns, rets)}
}}
"""
| pytorch-master | torchgen/native_function_generation.py |
import textwrap
from dataclasses import dataclass
from typing import List, Optional, Sequence, Tuple
from torchgen.api.translate import translate
from torchgen.api.types import DispatcherSignature
from torchgen.context import method_with_native_function
from torchgen.model import (
Argument,
BaseTy,
BaseType,
FunctionSchema,
ListType,
NativeFunction,
OptionalType,
Return,
SchemaKind,
Type,
)
from torchgen.utils import mapMaybe
def is_tensor(typ: Type) -> bool:
return isinstance(typ, BaseType) and typ.name == BaseTy.Tensor
def is_optional_tensor(typ: Type) -> bool:
return isinstance(typ, OptionalType) and is_tensor(typ.elem)
def is_tensor_list(typ: Type) -> bool:
return isinstance(typ, ListType) and is_tensor(typ.elem)
def unwrap_tensor(name: str, cur_level_var: str) -> List[str]:
result = f"""\
Tensor {name}_value;
optional<int64_t> {name}_bdim;
std::tie({name}_value, {name}_bdim) = unwrapTensorAtLevel({name}, {cur_level_var});"""
return textwrap.dedent(result).split("\n")
def unwrap_optional_tensor(name: str, cur_level_var: str) -> List[str]:
result = f"""\
optional<Tensor> {name}_value;
optional<int64_t> {name}_bdim;
if ({name}) {{
std::tie({name}_value, {name}_bdim) = unwrapTensorAtLevel({name}.value(), {cur_level_var});
}}"""
return textwrap.dedent(result).split("\n")
def gen_unwraps(
flat_arguments: Sequence[Argument], cur_level_var: str
) -> Tuple[str, List[str]]:
arg_names = [a.name for a in flat_arguments]
arg_types = [a.type for a in flat_arguments]
tensors = [name for typ, name in zip(arg_types, arg_names) if is_tensor(typ)]
optional_tensors = [
name for typ, name in zip(arg_types, arg_names) if is_optional_tensor(typ)
]
unwraps = []
for tensor in tensors:
unwraps += unwrap_tensor(tensor, cur_level_var)
for opt_tensor in optional_tensors:
unwraps += unwrap_optional_tensor(opt_tensor, cur_level_var)
unwrap_code = "\n".join(unwraps)
unwrapped_arg_list = []
for arg in arg_names:
if arg in tensors or arg in optional_tensors:
unwrapped_arg_list += [f"{arg}_value", f"{arg}_bdim"]
else:
unwrapped_arg_list.append(arg)
return unwrap_code, unwrapped_arg_list
def gen_case_where_all_bdims_are_none(
schema: FunctionSchema, cur_level_var: str
) -> str:
conditions = []
flat_args = schema.arguments.flat_all
for arg in flat_args:
if not arg.type.is_tensor_like():
continue
conditions.append(f"!isBatchedAtLevel({arg.name}, {cur_level_var})")
sig = DispatcherSignature.from_schema(schema)
translated_args = ", ".join(
e.expr for e in translate(sig.arguments(), sig.arguments())
)
return f"""\
if ({' && '.join(conditions)}) {{
return at::_ops::{sig.func.name.unambiguous_name()}::call({translated_args});
}}"""
def gen_returns(
returns: Tuple[Return, ...], cur_level_var: str, results_var: str
) -> str:
idx = 0
wrapped_returns = []
for ret in returns:
if is_tensor(ret.type):
wrapped_returns.append(
f"makeBatched(std::get<{idx}>({results_var}), std::get<{idx + 1}>({results_var}), {cur_level_var})"
)
idx += 2
elif is_tensor_list(ret.type):
wrapped_returns.append(
f"makeBatchedVector(std::get<{idx}>({results_var}), std::get<{idx+1}>({results_var}), {cur_level_var})"
)
idx += 2
else:
wrapped_returns.append(f"std::get<{idx}>({results_var})")
idx += 1
if len(wrapped_returns) == 1:
result = f"return {wrapped_returns[0]};"
else:
result = f'return std::make_tuple({", ".join(wrapped_returns)});'
return result
def accepts_at_least_one_tensor_input(schema: FunctionSchema) -> bool:
return any(a.type.is_tensor_like() for a in schema.arguments.flat_all)
def is_mutated_arg(argument: Argument) -> bool:
return argument.annotation is not None and argument.annotation.is_write
def gen_vmap_inplace_plumbing(native_function: NativeFunction) -> Optional[str]:
# Assumptions:
# - only one argument is being modified in-place
# - the argument that is being modified in-place is the first argument
# - all returns are either Tensor, tuple of Tensor, or TensorList
schema = native_function.func
sig = DispatcherSignature.from_schema(schema)
returns = schema.returns
# Check assumptions. If these are invalid we return None
# and punt the work to handle them to the future.
assert schema.kind() == SchemaKind.inplace
if not is_mutated_arg(schema.arguments.flat_all[0]):
return None
if not len([arg for arg in schema.arguments.flat_all if is_mutated_arg(arg)]) == 1:
return None
# Only support cases where all returns are Tensors or vector<Tensor>
if len(returns) == 0:
return None
if not all(is_tensor(ret.type) or is_tensor_list(ret.type) for ret in returns):
return None
if not accepts_at_least_one_tensor_input(schema):
return None
cur_level_var = "cur_level"
unwraps, unwrapped_arg_list = gen_unwraps(schema.arguments.flat_all, cur_level_var)
bdims_all_none_case = gen_case_where_all_bdims_are_none(schema, cur_level_var)
return f"""\
template <typename batch_rule_t, batch_rule_t batch_rule>
{sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
c10::impl::ExcludeDispatchKeyGuard guard(kBatchedKey);
auto maybe_layer = maybeCurrentDynamicLayer();
TORCH_INTERNAL_ASSERT(maybe_layer.has_value());
int64_t {cur_level_var} = maybe_layer->layerId();
{textwrap.indent(bdims_all_none_case, " ")}
{textwrap.indent(unwraps, " ")}
batch_rule({', '.join(unwrapped_arg_list)});
return {schema.arguments.flat_all[0].name};
}}"""
def gen_vmap_plumbing_no_returns(native_function: NativeFunction) -> str:
schema = native_function.func
sig = DispatcherSignature.from_schema(schema)
cur_level_var = "cur_level"
unwraps, unwrapped_arg_list = gen_unwraps(schema.arguments.flat_all, cur_level_var)
bdims_all_none_case = gen_case_where_all_bdims_are_none(schema, cur_level_var)
return f"""\
template <typename batch_rule_t, batch_rule_t batch_rule>
{sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
c10::impl::ExcludeDispatchKeyGuard guard(kBatchedKey);
auto maybe_layer = maybeCurrentDynamicLayer();
TORCH_INTERNAL_ASSERT(maybe_layer.has_value());
int64_t {cur_level_var} = maybe_layer->layerId();
{textwrap.indent(bdims_all_none_case, " ")}
{textwrap.indent(unwraps, " ")}
batch_rule({', '.join(unwrapped_arg_list)});
}}"""
def gen_vmap_plumbing(native_function: NativeFunction) -> Optional[str]:
schema = native_function.func
sig = DispatcherSignature.from_schema(schema)
returns = schema.returns
# Only support cases where all returns are Tensors or vector<Tensor>
if not accepts_at_least_one_tensor_input(schema):
return None
if len(returns) == 0:
return gen_vmap_plumbing_no_returns(native_function)
if not all(ret.type.is_tensor_like() for ret in returns):
return None
# in-place views need special handling
if "inplace_view" in native_function.tags:
return None
if schema.kind() == SchemaKind.inplace:
return gen_vmap_inplace_plumbing(native_function)
# Don't support these (mutable, out, scratch)
if schema.kind() != SchemaKind.functional:
return None
results_var = "results"
cur_level_var = "cur_level"
unwraps, unwrapped_arg_list = gen_unwraps(schema.arguments.flat_all, cur_level_var)
bdims_all_none_case = gen_case_where_all_bdims_are_none(schema, cur_level_var)
wrapped_returns = gen_returns(returns, cur_level_var, results_var)
return f"""\
template <typename batch_rule_t, batch_rule_t batch_rule>
{sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
c10::impl::ExcludeDispatchKeyGuard guard(kBatchedKey);
auto maybe_layer = maybeCurrentDynamicLayer();
TORCH_INTERNAL_ASSERT(maybe_layer.has_value());
int64_t {cur_level_var} = maybe_layer->layerId();
{textwrap.indent(bdims_all_none_case, " ")}
{textwrap.indent(unwraps, " ")}
auto {results_var} = batch_rule({', '.join(unwrapped_arg_list)});
{wrapped_returns}
}}"""
@dataclass(frozen=True)
class ComputeBatchRulePlumbing:
@method_with_native_function
def __call__(self, f: NativeFunction) -> Optional[str]:
opname = str(f.func.name)
result = gen_vmap_plumbing(f)
return result
def gen_all_vmap_plumbing(native_functions: Sequence[NativeFunction]) -> str:
body = "\n".join(list(mapMaybe(ComputeBatchRulePlumbing(), native_functions)))
return f"""
#pragma once
#include <ATen/Operators.h>
#include <functorch/csrc/PlumbingHelper.h>
#include <functorch/csrc/Constants.h>
namespace at {{ namespace functorch {{
{body}
}}}} // namespace at::functorch
"""
| pytorch-master | torchgen/gen_vmap_plumbing.py |
import argparse
import functools
import json
import os
import pathlib
from collections import defaultdict, namedtuple, OrderedDict
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Sequence, Set, Tuple, TypeVar, Union
import yaml
from typing_extensions import Literal
import torchgen.api.dispatcher as dispatcher
import torchgen.api.meta as meta
import torchgen.api.native as native
import torchgen.api.structured as structured
import torchgen.dest as dest
from torchgen.api import cpp
from torchgen.api.translate import translate
from torchgen.api.types import (
Binding,
CppSignatureGroup,
DispatcherSignature,
NamedCType,
NativeSignature,
SpecialArgName,
)
from torchgen.context import (
method_with_native_function,
native_function_manager,
with_native_function,
with_native_function_and_indices,
)
from torchgen.gen_functionalization_type import (
gen_composite_view_copy_kernel,
gen_functionalization_definition,
gen_functionalization_registration,
gen_functionalization_view_inverse_declaration,
gen_symint_view_copy_kernel,
)
from torchgen.gen_vmap_plumbing import gen_all_vmap_plumbing
from torchgen.model import (
Argument,
BackendIndex,
BackendMetadata,
BaseOperatorName,
DEFAULT_KERNEL_NAMESPACE,
DispatchKey,
FunctionSchema,
is_cuda_dispatch_key,
is_generic_dispatch_key,
is_ufunc_dispatch_key,
Location,
NativeFunction,
NativeFunctionsGroup,
NativeFunctionsViewGroup,
OperatorName,
OptionalType,
SchemaKind,
SelfArgument,
STRUCTURED_DISPATCH_KEYS,
TensorOptionsArguments,
Type,
Variant,
ViewSchemaKind,
)
from torchgen.native_function_generation import (
add_generated_native_functions,
gen_composite_functional_kernel,
gen_composite_out_kernel,
pre_group_native_functions,
)
from torchgen.selective_build.selector import SelectiveBuilder
from torchgen.utils import (
assert_never,
concatMap,
context,
FileManager,
make_file_manager,
mapMaybe,
NamespaceHelper,
Target,
YamlDumper,
YamlLoader,
)
T = TypeVar("T")
# Welcome to the ATen code generator v2! The ATen code generator is
# responsible for parsing native_functions.yaml and then generating
# various generated files (e.g., TypeDefault.cpp) based on the operators
# defined in this file. This means that the code generator knows how to
# parse function schema, and then translate this into various C++ types
# and boilerplate code.
#
# Some things to know about this file when you modify it:
#
# - This file has STRICT mypy typechecking. Typecheck it with
# `mypy --config mypy-strict.ini` in the root source directory
#
# - Most of the heavy lifting lives in external modules:
# - 'model' has the data model for native_functions.yaml. The classes
# in those file represent what you see when you look at
# a native_functions.yaml
# - 'api' has conversions for how to translate JIT schema into
# the various C++ APIs that the codegen interacts with. There
# are in fact THREE different C++ APIs: the public C++ API,
# the dispatcher API, and the legacy dispatcher API. See each
# of these respective files for more information
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# HELPER FUNCTIONS
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# A custom loader for YAML to let us also keep track of line numbers
# of each entry in the YAML file
class LineLoader(YamlLoader):
def construct_mapping(self, node, deep=False): # type: ignore[no-untyped-def]
mapping = super().construct_mapping(node, deep=deep) # type: ignore[no-untyped-call]
# Add 1 so line numbering starts at 1
mapping["__line__"] = node.start_mark.line + 1
return mapping
_GLOBAL_PARSE_NATIVE_YAML_CACHE = {}
_GLOBAL_PARSE_TAGS_YAML_CACHE = {}
# Parse native_functions.yaml into a sequence of NativeFunctions and Backend Indices.
ParsedYaml = namedtuple("ParsedYaml", ["native_functions", "backend_indices"])
def parse_native_yaml_struct(
es: object,
valid_tags: Set[str],
ignore_keys: Optional[Set[DispatchKey]] = None,
path: str = "<stdin>",
skip_native_fns_gen: bool = False,
) -> ParsedYaml:
assert isinstance(es, list)
rs: List[NativeFunction] = []
bs: Dict[DispatchKey, Dict[OperatorName, BackendMetadata]] = defaultdict(dict)
for e in es:
assert isinstance(e.get("__line__"), int), e
loc = Location(path, e["__line__"])
funcs = e.get("func")
with context(lambda: f"in {loc}:\n {funcs}"):
func, m = NativeFunction.from_yaml(e, loc, valid_tags, ignore_keys)
rs.append(func)
BackendIndex.grow_index(bs, m)
error_check_native_functions(rs)
# Default dict is to prevent the codegen from barfing when we have a dispatch key that has no kernels yet.
indices: Dict[DispatchKey, BackendIndex] = defaultdict(
lambda: BackendIndex(
dispatch_key=DispatchKey.Undefined,
use_out_as_primary=True,
external=False,
device_guard=False,
index={},
)
)
if not skip_native_fns_gen:
add_generated_native_functions(rs, bs)
for k, v in bs.items():
# All structured in-tree operators are implemented in terms of their out operator.
indices[k] = BackendIndex(
dispatch_key=k,
use_out_as_primary=True,
external=False,
# Only cuda-like devices in tree require device guards
device_guard=is_cuda_dispatch_key(k),
index=v,
)
return ParsedYaml(rs, indices)
def parse_tags_yaml_struct(es: object, path: str = "<stdin>") -> Set[str]:
assert isinstance(es, list)
rs: Set[str] = set()
for e in es:
assert isinstance(e.get("__line__"), int), e
loc = Location(path, e["__line__"])
tags = e.get("tag")
with context(lambda: f"in {loc}:\n {tags}"):
e_i = e.copy()
name = e_i.pop("tag")
desc = e_i.pop("desc", "")
# ensure that each tag has a non-empty description
assert desc != ""
rs.add(name)
return rs
@functools.lru_cache(maxsize=None)
def parse_tags_yaml(path: str) -> Set[str]:
global _GLOBAL_PARSE_TAGS_YAML_CACHE
if path not in _GLOBAL_PARSE_TAGS_YAML_CACHE:
with open(path, "r") as f:
es = yaml.load(f, Loader=LineLoader)
_GLOBAL_PARSE_TAGS_YAML_CACHE[path] = parse_tags_yaml_struct(es, path=path)
return _GLOBAL_PARSE_TAGS_YAML_CACHE[path]
def parse_native_yaml(
path: str,
tags_yaml_path: str,
ignore_keys: Optional[Set[DispatchKey]] = None,
*,
skip_native_fns_gen: bool = False,
) -> ParsedYaml:
global _GLOBAL_PARSE_NATIVE_YAML_CACHE
if path not in _GLOBAL_PARSE_NATIVE_YAML_CACHE:
valid_tags = parse_tags_yaml(tags_yaml_path)
with open(path, "r") as f:
es = yaml.load(f, Loader=LineLoader)
_GLOBAL_PARSE_NATIVE_YAML_CACHE[path] = parse_native_yaml_struct(
es,
valid_tags,
ignore_keys,
path=path,
skip_native_fns_gen=skip_native_fns_gen,
)
return _GLOBAL_PARSE_NATIVE_YAML_CACHE[path]
# Some assertions are already performed during parsing, but those are only within a single NativeFunction.
# Assertions here are meant to be performed across NativeFunctions.
def error_check_native_functions(funcs: Sequence[NativeFunction]) -> None:
func_map: Dict[OperatorName, NativeFunction] = {}
base_func_map: Dict[BaseOperatorName, List[NativeFunction]] = defaultdict(list)
for f in funcs:
func_map[f.func.name] = f
base_func_map[f.func.name.name].append(f)
for f in funcs:
if f.structured_delegate is not None:
delegate_func = func_map[f.structured_delegate]
assert delegate_func.structured, (
f"{f.func.name} is marked as a structured_delegate pointing to "
f"{f.structured_delegate}, but {f.structured_delegate} is not marked as structured. "
f"Consider adding 'structured=True' to the delegated operator"
)
# See Note [resize_ in Functionalization]
# resize_() is technically an inplace view op (and therefore needs the tag),
# but it would be overkill to add a true "view" variant of resize.
# Instead, resize_() gets special treatment in functionalization,
# and we have a resize() op that is non-aliasing + functional.
if "inplace_view" in f.tags and str(f.func.name) != "resize_":
base_name = f.func.name.name
overload_name = f.func.name.overload_name
assert base_name.inplace, (
f"{f.func.name} is marked with tag: inplace_view, but it doesn't follow the naming "
"convention for inplace ops - the codegen expects the base name to have a trailing underscore. "
)
out_of_place_base_name = BaseOperatorName(
base_name.base, False, base_name.dunder_method
)
assert len(base_func_map[out_of_place_base_name]) > 0, (
f"{f.func.name} is marked with tag: inplace_view. The codegen expects there to be a corresponding "
f"out-of-place view op with the name '{base_name}' and matching schema, but it didn't find one. "
)
def cpp_string(s: str) -> str:
"""Convert a python string into a c++ string literal"""
s = s.replace("\\", "\\\\")
s = s.replace('"', '\\"')
s = s.replace("\a", "\\a")
s = s.replace("\b", "\\b")
s = s.replace("\f", "\\f")
s = s.replace("\n", "\\n")
s = s.replace("\v", "\\v")
s = s.replace("\t", "\\t")
return f'"{s}"'
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# C++ CODE GENERATION
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# Most functions in this section are curried: they consist of a function
# that takes some parameters (e.g., what is to be generated) which itself
# returns a function that actually maps NativeFunction to the code
# to be generated. This pattern makes it convenient to use map, concatMap
# and similar functional combinators.
def static_dispatch_keys(backends: List[BackendIndex]) -> List[DispatchKey]:
if len(backends) == 0:
return []
else:
return [backend.dispatch_key for backend in backends] + [
DispatchKey.CompositeImplicitAutograd,
DispatchKey.CompositeExplicitAutograd,
DispatchKey.CompositeExplicitAutogradNonFunctional,
]
def get_static_dispatch_backend(
f: NativeFunction, backend_index: BackendIndex
) -> Optional[DispatchKey]:
if f.structured_delegate is not None or backend_index.has_kernel(f):
# TODO: for ops with structured_delegate it should check the dispatch table of
# the out variant instead. For now, these structured ops all have CPU/CUDA kernels
# so we always dispatch to the `backend`, but this could be wrong when we
# migrate math/default_backend ops to use structured delegate.
return backend_index.dispatch_key
elif f.has_composite_explicit_autograd_kernel:
return DispatchKey.CompositeExplicitAutograd
elif f.has_composite_explicit_autograd_non_functional_kernel:
return DispatchKey.CompositeExplicitAutogradNonFunctional
elif f.has_composite_implicit_autograd_kernel:
return DispatchKey.CompositeImplicitAutograd
return None
def static_dispatch_ops_header(
f: NativeFunction, backend_index: List[BackendIndex]
) -> Optional[str]:
if backend_index is None or f.manual_kernel_registration:
return None
output = []
for index in backend_index:
dispatch_key = get_static_dispatch_backend(f, index)
if dispatch_key is not None:
output.append(
f"#include <ATen/ops/{f.root_name}_{dispatch_key.lower()}_dispatch.h>"
)
return "\n".join(output)
def static_dispatch_extra_headers(backends: List[BackendIndex]) -> List[str]:
return [
f"#include <ATen/{dispatch_key}Functions.h>"
for dispatch_key in static_dispatch_keys(backends)
]
# Translates arguments of a native function from DispatcherSignature form to CppSignature form with support for
# supporting usecases even when there is a memory_format argument along with tensor_option arguments.
# This usecase is not covered by tools.codegen.api.translate() yet as its application is limited to static dispatch
def translate_args_dispatcher_to_cpp(
f: NativeFunction,
) -> str:
# Adds SpecialArgName.possibly_redundant_memory_format NamedCType for memory_format bindings
def add_spl_memory_format_binding(input_bindings: List[Binding]) -> List[Binding]:
output_bindings: List[Binding] = []
for binding in input_bindings:
if binding.name == "memory_format":
spl_mem_format_binding = Binding(
nctype=NamedCType(
SpecialArgName.possibly_redundant_memory_format,
binding.nctype.type,
),
name=binding.name,
default=binding.default,
argument=binding.argument,
)
output_bindings.append(spl_mem_format_binding)
else:
output_bindings.append(binding)
return output_bindings
disp_sig = DispatcherSignature.from_schema(f.func)
cpp_sig = CppSignatureGroup.from_native_function(
f, method=False, fallback_binding=False
).signature
disp_bindings = disp_sig.arguments()
# When last argument of CPP signature has SpecialArgName.possibly_redundant_memory_format NCType,
# get memory_format bindings of dispatcher signature to have the same NCType as well
for arg in cpp_sig.arguments():
if arg.nctype.name == SpecialArgName.possibly_redundant_memory_format:
disp_bindings = add_spl_memory_format_binding(disp_sig.arguments())
break
exprs = translate(disp_bindings, cpp_sig.arguments())
return ", ".join(a.expr for a in exprs)
def generate_static_dispatch_backend_call(
f: NativeFunction,
backend_index: BackendIndex,
) -> str:
name = DispatcherSignature.from_schema(f.func).name()
exprs = translate_args_dispatcher_to_cpp(f)
backend_metadata = backend_index.get_kernel(f)
kernel_ns = (
backend_metadata.cpp_namespace
if backend_metadata and backend_metadata.cpp_namespace
else DEFAULT_KERNEL_NAMESPACE
)
ns = kernel_ns.replace("::native", "")
return f"return {ns}::{backend_index.dispatch_key.lower()}::{name}({exprs});"
def generate_static_dispatch_fallback_call(
f: NativeFunction,
backend_indices: List[BackendIndex],
) -> str:
name = DispatcherSignature.from_schema(f.func).name()
exprs = translate_args_dispatcher_to_cpp(f)
ns = DEFAULT_KERNEL_NAMESPACE.replace("::native", "")
if f.has_composite_explicit_autograd_kernel:
return f"return {ns}::{DispatchKey.CompositeExplicitAutograd.lower()}::{name}({exprs});"
elif f.has_composite_explicit_autograd_non_functional_kernel:
return f"return {ns}::{DispatchKey.CompositeExplicitAutogradNonFunctional.lower()}::{name}({exprs});"
elif f.has_composite_implicit_autograd_kernel:
return f"return {ns}::{DispatchKey.CompositeImplicitAutograd.lower()}::{name}({exprs});"
else:
return f"""TORCH_CHECK(false, "Static dispatch does not support {name} for\
{', '.join([str(index.dispatch_key)for index in backend_indices])} ");"""
def static_dispatch(
f: NativeFunction,
backend_indices: List[BackendIndex],
) -> str:
if len(backend_indices) == 0 or f.manual_kernel_registration:
return ""
keys = [
b
for b in backend_indices
if b.has_kernel(f)
or (
f.structured_delegate is not None
and b.dispatch_key in STRUCTURED_DISPATCH_KEYS
)
]
if len(keys) == 1:
return generate_static_dispatch_backend_call(f, keys[0])
elif len(keys) == 0:
return generate_static_dispatch_fallback_call(f, backend_indices)
sig = DispatcherSignature.from_schema(f.func)
native_tensor_args = [
a.name
for a in sig.arguments()
if isinstance(a.argument, SelfArgument)
or isinstance(a.argument, Argument)
and a.argument.type.is_tensor_like()
]
tensor_args = ", ".join(native_tensor_args)
tensor_opts = f.func.arguments.tensor_options
stmts = []
subexprs: List[str] = []
if tensor_opts is not None:
subexprs.append(
"DispatchKeySet(c10::computeDispatchKey(dtype, layout, device))"
)
if tensor_args != "":
subexprs.append(f"c10::detail::multi_dispatch_key_set({tensor_args})")
stmts.append(f"""DispatchKeySet _dk_set = {' | '.join(subexprs)};""")
stmts.append("DispatchKey _dk = c10::highestPriorityBackendTypeId(_dk_set);")
dispatch_code = []
for index in keys:
dispatch_code.append(f"""case DispatchKey::{index.dispatch_key}:""")
dispatch_code.append(
f"""\t{generate_static_dispatch_backend_call(f, index)};"""
)
fallback = generate_static_dispatch_fallback_call(f, backend_indices)
connector = "\n\t\t"
return f"""
{connector.join(stmts)}
switch (_dk) {{
{connector.join(dispatch_code)}
default:
{fallback}
}}
"""
# Generates RegisterSchema.cpp. Depending on the selector, either
# all schemas are registered, or only some are (in the case of
# selective build)
@dataclass(frozen=True)
class RegisterSchema:
selector: SelectiveBuilder
@method_with_native_function
def __call__(self, f: NativeFunction) -> Optional[str]:
if not self.selector.is_native_function_selected(f):
return None
tags = "{" + ", ".join([f"at::Tag::{tag}" for tag in f.tags]) + "}"
return f"m.def({cpp_string(str(f.func))}, {tags});\n"
# Generates Operators.h and Operators.cpp.
# These provide macros that, given an operator and overload name, allow users
# to access an "un-overloaded" function version of the operator. This
# is useful for extension writers who want to (1) want to decltype the operator
# and (2) don't want to worry about method-only operators.
@dataclass(frozen=True)
class ComputeOperators:
target: Union[Literal[Target.DECLARATION], Literal[Target.DEFINITION]]
static_dispatch_backend_indices: List[BackendIndex]
@method_with_native_function
def __call__(self, f: NativeFunction) -> str:
sig = DispatcherSignature.from_schema(f.func)
name = f.func.name.unambiguous_name()
call_method_name = "call"
redispatch_method_name = "redispatch"
if self.target is Target.DECLARATION:
# Note [The ATen Operators API]
# The ATen Operators API lives in the at::_ops namespace, and contains compile-time
# metadata about each operator + entry points into the Dispatcher.
# The C++ function, method, and redispatch API's are all implemented as wrappers
# into various bits of the structs defined here.
#
# Important characteristics about the Operators API:
# (1) It follows the Dispatcher API.
# This is kind of necessary to avoid overhead.
# For example: if it followed the C++ API, then all of the faithful C++ factory functions
# would need to wrap their arguments into TensorOptions only to unwrap them again.
# (2) Overload names are disambiguated.
# This is helpful for pytorch extenders who would like to decltype() an aten operator,
# that has overloads, e.g. decltype(at::_ops::mul_Tensor::call)
# (3) No argument defaulting is allowed.
# This is more of an implementation detail to avoid #include cycles,
# since TensorBody.h (which defines the Tensor class) needs to include this file.
# (4) manual_cpp_bindings and faithful names are not included in the API.
# This applies to stuff like __dispatch__is_complex(), and add_outf().
# These aren't "real aten ops", they're just additional functions provided by the C++ API.
# They're implemented as wrappers in Functions.h that call into the actual operators
# defined here, i.e. at::_ops::is_complex::call() and at::_ops::add_out::call().
# This means that ATEN_OP(is_complex) will not fastpath, and will go through the dispatcher.
return f"""
struct TORCH_API {name} {{
using schema = {sig.type()};
using ptr_schema = schema*;
// See Note [static constexpr char* members for windows NVCC]
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::{f.func.name.name}")
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "{f.func.name.overload_name}")
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, {cpp_string(str(f.func))})
static {sig.defn(name=call_method_name, is_redispatching_fn=False)};
static {sig.defn(name=redispatch_method_name, is_redispatching_fn=True)};
}};"""
elif self.target is Target.DEFINITION:
defns = f"""
STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA({name}, name, "aten::{f.func.name.name}")
STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA({name}, overload_name, "{f.func.name.overload_name}")
STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA({name}, schema_str, {cpp_string(str(f.func))})
// aten::{f.func}
static C10_NOINLINE c10::TypedOperatorHandle<{name}::schema> create_{name}_typed_handle() {{
return c10::Dispatcher::singleton()
.findSchemaOrThrow({name}::name, {name}::overload_name)
.typed<{name}::schema>();
}}
"""
for is_redispatching_fn in [False, True]:
if is_redispatching_fn:
dispatcher_exprs_str = ", ".join(
["dispatchKeySet"] + [a.name for a in sig.arguments()]
)
dispatcher_call = "redispatch"
method_name = f"{name}::{redispatch_method_name}"
else:
method_name = f"{name}::{call_method_name}"
dispatcher_exprs_str = ", ".join([a.name for a in sig.arguments()])
dispatcher_call = "call"
fn_body = f"""
static auto op = create_{name}_typed_handle();
return op.{dispatcher_call}({dispatcher_exprs_str});"""
if (
not is_redispatching_fn
and len(self.static_dispatch_backend_indices) > 0
):
# call() should go through static dispatch
fn_body = static_dispatch(
f, backend_indices=self.static_dispatch_backend_indices
)
defns += f"""
// aten::{f.func}
{sig.defn(name=method_name, is_redispatching_fn=is_redispatching_fn)} {{
{fn_body}
}}
"""
return defns
else:
assert_never(self.target)
# Generates Functions.h, which provides the functional public C++ API,
# and the scaffolding to call into the dispatcher from these functions.
@dataclass(frozen=True)
class ComputeFunction:
@method_with_native_function
def __call__(self, f: NativeFunction) -> Optional[str]:
if Variant.function not in f.variants:
return None
sig_group = CppSignatureGroup.from_native_function(
f, method=False, fallback_binding=f.manual_cpp_binding
)
def generate_defn(faithful: bool) -> str:
if faithful:
sig = sig_group.faithful_signature
assert sig is not None
else:
sig = sig_group.signature
# See Note [The ATen Operators API]
target_sig = DispatcherSignature.from_schema(f.func)
exprs = translate(sig.arguments(), target_sig.arguments())
exprs_str = ", ".join([e.expr for e in exprs])
return f"""
// aten::{f.func}
inline {sig.decl()} {{
return at::_ops::{f.func.name.unambiguous_name()}::call({exprs_str});
}}
"""
result = generate_defn(False)
if sig_group.faithful_signature is not None:
result += generate_defn(True)
return result
# Generates TensorBody.h. This file provides the object-oriented (method-based)
# public C++ API, and the scaffolding to call into the dispatcher from these functions.
@dataclass(frozen=True)
class ComputeTensorMethod:
target: Union[Literal[Target.DECLARATION], Literal[Target.DEFINITION]]
static_dispatch_backend_indices: List[BackendIndex]
@method_with_native_function
def __call__(self, f: NativeFunction) -> Optional[str]:
if Variant.method not in f.variants:
return None
assert not f.func.is_out_fn()
assert f.func.arguments.self_arg is not None
sig_group = CppSignatureGroup.from_native_function(
f, method=True, fallback_binding=f.manual_cpp_binding
)
if self.target is Target.DECLARATION:
result = f"{sig_group.signature.decl()} const;\n"
if sig_group.faithful_signature is not None:
result += f"{sig_group.faithful_signature.decl()} const;\n"
return result
if self.target is not Target.DEFINITION:
assert_never(self.target)
def generate_defn(faithful: bool) -> str:
if faithful:
sig = sig_group.faithful_signature
assert sig is not None
else:
sig = sig_group.signature
target_sig = DispatcherSignature.from_schema(f.func)
exprs = translate(sig.arguments(), target_sig.arguments(), method=True)
exprs_str = ", ".join([e.expr for e in exprs])
return f"""
// aten::{f.func}
inline {sig.defn(prefix="Tensor::")} const {{
return at::_ops::{f.func.name.unambiguous_name()}::call({exprs_str});
}}
"""
result = generate_defn(faithful=False)
if sig_group.faithful_signature is not None:
result += generate_defn(faithful=True)
return result
# Generates RedispatchFunctions.h.
# This is similar to the C++ API defined in Functions.h, but provides access
# to the dispatcher's redispatch API.
@dataclass(frozen=True)
class ComputeRedispatchFunction:
@method_with_native_function
def __call__(self, f: NativeFunction) -> Optional[str]:
# We unconditionally generate function variants of the redispatch API.
# This is mainly because we can namespace functions separately, but not methods,
sig_group = CppSignatureGroup.from_native_function(
f, method=False, fallback_binding=f.manual_cpp_binding
)
def generate_defn(faithful: bool) -> str:
if faithful:
sig = sig_group.faithful_signature
assert sig is not None
else:
sig = sig_group.signature
target_sig = DispatcherSignature.from_schema(f.func)
exprs = translate(sig.arguments(), target_sig.arguments())
exprs_str = ", ".join(["dispatchKeySet"] + [a.expr for a in exprs])
return f"""
// aten::{f.func}
inline {sig.decl(is_redispatching_fn=True)} {{
return at::_ops::{f.func.name.unambiguous_name()}::redispatch({exprs_str});
}}
"""
result = generate_defn(False)
if sig_group.faithful_signature is not None:
result += generate_defn(True)
return result
# Generates ATenOpList.cpp, a runtime accessible list of all aten
# operators.
# TODO: This was historically used to help some JIT interop code
# figure out whether or not to treat aten namespace'd operators
# one way or another, we should reevaluate if this is actually needed.
@with_native_function
def compute_aten_op(f: NativeFunction) -> str:
return f'{{"aten::{f.func.name.name}", "{f.func.name.overload_name}"}},'
# Generates MetaFunctions.h
def compute_meta_function_declaration(g: NativeFunctionsGroup) -> Optional[str]:
if not g.structured:
return None
with native_function_manager(g.out):
name = meta.name(g)
args = structured.meta_arguments(g)
args_str = ", ".join(a.decl() for a in args)
parent_class = g.out.structured_inherits
if parent_class is None:
parent_class = "at::impl::MetaBase"
meta_return = "void"
precomputed = g.out.precomputed if g.structured else None
if precomputed:
# Generate the template declaration with one bool parameter for each
# precomputed element. Each parameter is true if the corresponding (in
# terms of position) precomputed element has been set.
precomputed_values = [*precomputed.replace.values(), precomputed.add]
precomputed_elements = [
elem for replace_list in precomputed_values for elem in replace_list
]
precomputed_template_parameters = [
elem.name.upper() for elem in precomputed_elements
]
precomputed_template_params_str = ", ".join(
f"bool {param} = false" for param in precomputed_template_parameters
)
precompute_template_decl = f"template <{precomputed_template_params_str}>"
# Generate a string containing declarations of all precomputed elements.
precomputed_elements_with_cpp_types = [
structured.argument_type(elem, binds=elem.name)
for elem in precomputed_elements
]
precomputed_elements_decl = ";\n".join(
f"{elem.cpp_type(strip_ref=True)} {elem.name}"
for elem in precomputed_elements_with_cpp_types
)
# Generate "setter" methods for each precomputed element. Each method will return
# a new instance of precompute_out with the template parameter that corresponds to
# the member set by the method to true (to indicate that it has been set).
setter_methods = []
for i, elem in enumerate(precomputed_elements):
# Generate the signature. The return type will be the same
# as the type of `this` but with the template parameter
# corresponding to the element set by this method set to true.
# The assert generated below will ensure that this template
# parameter is false on the type of `this`.
return_ty_templates = ", ".join(
precomputed_template_parameters[:i]
+ ["true"]
+ precomputed_template_parameters[i + 1 :]
)
return_ty = f"precompute_out<{return_ty_templates}>"
elem_cpp_ty = precomputed_elements_with_cpp_types[i].cpp_type(
strip_ref=True
)
signature = f"{return_ty} set_{elem.name}({elem_cpp_ty} value)"
# Generate an assert which checks that the
# template parameter corresponding to the precomputed
# element that is set by this method is false on the
# class corresponding to the object that `this` points to.
# This ensures that each element can be set only once.
assert_msg = f'"{precomputed_elements[i].name} already set"'
assert_stmt = f"static_assert({precomputed_template_parameters[i]} == false, {assert_msg});"
# Generate the new object construction block. All state
# except the element that this method sets is copied from the
# object that `this` points to. The value for the element that
# the method sets is taken from a method parameter.
construction_stmts = []
construction_stmts.append(f"{return_ty} ret;")
for j, elem in enumerate(precomputed_elements):
if i == j:
construction_stmts.append(f"ret.{elem.name} = value;")
else:
construction_stmts.append(
f"ret.{elem.name} = this->{elem.name};"
)
construction_stmts.append("return ret;")
construction_block = "\n".join(construction_stmts)
setter_methods.append(
f"""
{signature} {{
{assert_stmt}
{construction_block}
}}
"""
)
setter_methods_decl = "\n".join(setter_methods)
# Meta should return an instance of the struct containing the precomputed elements.
meta_return_template_params = ", ".join(
["true"] * len(precomputed_template_parameters)
)
# This typedef (actually a using statement) is needed so that TORCH_META_FUNC can reuse the return
# type (which has a variable number of template parameters).
meta_return_typedef = f"using meta_return_ty = precompute_out <{meta_return_template_params}>;"
meta_return = "meta_return_ty"
precomputed_decl = f"""
{precompute_template_decl}
struct TORCH_API precompute_out {{
{setter_methods_decl}
{precomputed_elements_decl};
}};"""
else:
meta_return_typedef = ""
precomputed_decl = ""
return f"""\
struct TORCH_API structured_{name} : public {parent_class} {{
{precomputed_decl}
{meta_return_typedef}
{meta_return} meta({args_str});
}};
"""
def needs_backend_select(f: NativeFunction, selector: SelectiveBuilder) -> bool:
name = str(f.func.name.name)
if name.endswith("_like") or name.startswith("new_"):
return False
if f.func.arguments.tensor_options is None:
return False
return selector.is_native_function_selected(f)
# Generates RegisterBackendSelect.cpp, a series of kernels which provide
# specialized computation of dispatch key for operator signatures which cannot
# be easily done automatically using templating.
@dataclass(frozen=True)
class ComputeBackendSelect:
target: Union[Literal[Target.DEFINITION], Literal[Target.REGISTRATION]]
# Selector object to determine which operators to generate
# registration code for.
selector: SelectiveBuilder
@method_with_native_function
def __call__(self, f: NativeFunction) -> Optional[str]:
if not needs_backend_select(f, self.selector):
return None
name = native.name(f.func)
native_sig = NativeSignature(f.func)
native_tensor_args = [
a
for a in native_sig.arguments()
if isinstance(a.argument, Argument) and a.argument.type.is_tensor_like()
]
dispatcher_sig = DispatcherSignature.from_schema(f.func)
sig: Union[NativeSignature, DispatcherSignature]
sig = dispatcher_sig
dispatcher_exprs = dispatcher_sig.exprs()
dispatch_key = "c10::computeDispatchKey(dtype, layout, device)"
if self.target is Target.DEFINITION:
# I don't think there's actually a good reason to generate
# these two cases differently
# The first case could probably be improved though- it calls computeDispatchKeySet(),
# which looks at TLS dispatch keys- there should not be any by the time we reach backend select.
if native_tensor_args:
assert f.func.arguments.has_tensor_arg()
tensor_args = ", ".join(a.name for a in native_tensor_args)
compute_dk = f"""\
DispatchKeySet _dk_set = c10::DispatchKeySet({dispatch_key}) | c10::detail::multi_dispatch_key_set({tensor_args});
DispatchKeySet _dk_mask = c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, DispatchKey::BackendSelect);
DispatchKeySet _dk = c10::impl::computeDispatchKeySet(_dk_set, _dk_mask);"""
else:
assert not f.func.arguments.has_tensor_arg()
compute_dk = (
f"DispatchKeySet _dk = c10::DispatchKeySet({dispatch_key});"
)
return f"""\
// aten::{f.func}
C10_ALWAYS_INLINE
{sig.defn(name)} {{
{compute_dk}
return at::_ops::{f.func.name.unambiguous_name()}::redispatch(
_dk, {', '.join(a.expr for a in dispatcher_exprs)});
}}
"""
elif self.target is Target.REGISTRATION:
return f"""m.impl("aten::{f.func.name}", TORCH_FN({name}));"""
else:
assert_never(self.target)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# YAML CODE GENERATION
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def format_yaml(data: object) -> str:
# Ignore alias in Dumper
YamlDumper.ignore_aliases = lambda self, data: True # type: ignore[assignment]
# Support serializing OrderedDict
def dict_representer(dumper: Any, data: Any) -> Any:
return dumper.represent_dict(data.items())
YamlDumper.add_representer(OrderedDict, dict_representer) # type: ignore[no-untyped-call]
# Some yaml parsers (e.g. Haskell's) don't understand line breaks.
# width=1e9 turns off optional line breaks and improves
# the portability of the outputted yaml.
return yaml.dump(data, default_flow_style=False, Dumper=YamlDumper, width=1e9) # type: ignore[no-any-return, call-overload]
# For some reason, some defaults we write to YAML are written as native
# YAML objects, rather than doing them uniformly as strings. This
# function detects those cases and converts them into native Python
# objects.
def pythonify_default(s: str) -> object:
if s == "true":
return True
elif s == "false":
return False
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
return s
# What is a dynamic type? Over time, the semantic meaning of
# dynamic type has degraded to meaninglessness (in the old days,
# it captured dtype-ness of types, but that has gone away with
# the removal of TH). These days, it's mostly the same thing as
# the C++ API argument type, except that Tensor and Tensor?
# arguments simply present as Tensor.
#
# TODO: Get rid of dynamic_type, after getting tools/autograd
# to use the new codegen framework
def dynamic_type(t: Type) -> str:
if isinstance(t, OptionalType):
return dynamic_type(t.elem)
# Note we don't use t.is_tensor_like() here because it would
# also include Tensor[]
if str(t) == "Tensor":
return "at::Tensor"
return cpp.argumenttype_type(t, mutable=False, binds="__placeholder__").cpp_type()
def compute_method_of_yaml(variants: Set[Variant]) -> List[str]:
# This is written out explicitly to ensure that Tensor and
# namespace are put into the list in the right order
method_of = ["Type"]
if Variant.method in variants:
method_of.append("Tensor")
if Variant.function in variants:
method_of.append("namespace")
return method_of
def compute_returns_yaml(
f: NativeFunction,
) -> Tuple[List[Dict[str, str]], Dict[str, str]]:
# Note [name and field_name]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
# To understand name_to_field_name, we must first talk about this
# schema:
#
# lstsq.X(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr) -> (Tensor(a!) solution, Tensor(b!) QR)
#
# There is something very odd about this schema: it is an out
# variant of the function (that is to say, it will convert into
# at::lstsq_out() in the C++ API), but the names of the output
# return arguments don't match the keyword argument names of
# the inputs. It TURNS OUT that in this situation, the historical
# Declarations.yaml we want to output is this (abbreviated to
# only show relevant fields):
#
# arguments:
# ...
# - field_name: solution
# name: X
# - field_name: QR
# name: qr
# ...
#
# returns:
# - field_name: solution
# name: X
# - field_name: QR
# name: qr
#
# The name of the return fields is stored in 'field_name', and the
# name of the arguments is stored in 'name'. So when we process
# arguments, we need a way to get at the corresponding return. At
# the moment, this is most conveniently done by constructing a
# mapping from name (the argument concept) to field_name (the
# return concept) while processing return arguments, since we don't
# directly maintain this correspondence in the modeling of function
# schema itself.
#
# See also https://github.com/pytorch/pytorch/issues/43114
name_to_field_name: Dict[str, str] = {}
# Compute the returns field of the YAML entry
names = cpp.return_names(f)
returns = []
for i, (r, name) in enumerate(zip(f.func.returns, names)):
ret = {
"dynamic_type": dynamic_type(r.type),
"name": name,
"type": cpp.return_type(r).cpp_type(),
}
if r.name:
# See Note [name and field_name]
ret["field_name"] = r.name
if f.func.is_out_fn():
name_to_field_name[f.func.arguments.out[i].name] = r.name
returns.append(ret)
return returns, name_to_field_name
# arguments in yaml roughly corresponds to the public C++ API
def compute_cpp_argument_yaml(
cpp_a: Binding,
*,
schema_order: bool,
kwarg_only_set: Set[str],
out_arg_set: Set[str],
name_to_field_name: Dict[str, str],
) -> object:
if isinstance(cpp_a.argument, TensorOptionsArguments):
arg: Dict[str, object] = {
"annotation": None,
"dynamic_type": "at::TensorOptions",
"is_nullable": False,
"name": cpp_a.name,
"type": cpp_a.type,
"kwarg_only": True,
}
if cpp_a.default is not None:
arg["default"] = cpp_a.default
return arg
elif isinstance(cpp_a.argument, SelfArgument):
raise AssertionError()
elif isinstance(cpp_a.argument, Argument):
return compute_argument_yaml(
cpp_a.argument,
schema_order=schema_order,
kwarg_only_set=kwarg_only_set,
out_arg_set=out_arg_set,
name_to_field_name=name_to_field_name,
)
def compute_argument_yaml(
a: Argument,
*,
schema_order: bool,
kwarg_only_set: Set[str],
out_arg_set: Set[str],
name_to_field_name: Dict[str, str],
) -> object:
arg: Dict[str, object] = {
"annotation": str(a.annotation) if a.annotation else None,
"dynamic_type": dynamic_type(a.type),
"is_nullable": a.type.is_nullable(),
"name": a.name,
"type": cpp.argument_type(a, binds="__placeholder__").cpp_type(),
}
if a.default is not None:
arg["default"] = pythonify_default(cpp.default_expr(a.default, a.type))
if a.name in kwarg_only_set:
arg["kwarg_only"] = True
if a.name in out_arg_set:
arg["output"] = True
arg["allocate"] = True
# See Note [name and field_name]
if a.name in name_to_field_name:
arg["field_name"] = name_to_field_name[a.name]
# Historically, booleans don't get their size recorded, because it
# is already built into the cpp type (e.g., std::array<bool, 4>)
l = a.type.is_list_like()
if l is not None and l.size is not None and str(l.elem) != "bool":
arg["size"] = l.size
return arg
@with_native_function
def compute_declaration_yaml(f: NativeFunction) -> object:
returns, name_to_field_name = compute_returns_yaml(f)
# These sets are used to conveniently test if an argument is a
# kwarg-only or out argument
kwarg_only_set = set(a.name for a in f.func.arguments.flat_kwarg_only)
out_arg_set = set(a.name for a in f.func.arguments.out)
sig_group = CppSignatureGroup.from_native_function(
f, method=False, fallback_binding=False
)
cpp_args = sig_group.signature.arguments()
arguments = [
compute_cpp_argument_yaml(
cpp_a,
schema_order=False,
kwarg_only_set=kwarg_only_set,
out_arg_set=out_arg_set,
name_to_field_name=name_to_field_name,
)
for cpp_a in cpp_args
]
schema_order_jit_arguments = list(f.func.schema_order_arguments())
schema_order_arguments = [
compute_argument_yaml(
a,
schema_order=True,
kwarg_only_set=kwarg_only_set,
out_arg_set=out_arg_set,
name_to_field_name=name_to_field_name,
)
for a in schema_order_jit_arguments
]
cpp_schema_order_types = [
# NB: method here doesn't matter
r.type
for a in schema_order_jit_arguments
for r in cpp.argument(
a,
method=False,
cpp_no_default_args=set(),
faithful=False,
has_tensor_options=False,
)
]
cpp_returns = cpp.returns_type(f.func.returns).cpp_type()
schema_order_cpp_signature = f"{cpp_returns} ({', '.join(cpp_schema_order_types)})"
is_factory_method = (
any(isinstance(a.argument, TensorOptionsArguments) for a in cpp_args)
and Variant.method not in f.variants
)
return OrderedDict(
[
("name", cpp.name(f.func)),
("operator_name", str(f.func.name.name)),
("overload_name", str(f.func.name.overload_name)),
("manual_kernel_registration", f.manual_kernel_registration),
(
"category_override",
f.category_override if f.category_override is not None else "",
),
("schema_string", f"aten::{f.func}"),
("arguments", arguments),
("schema_order_cpp_signature", schema_order_cpp_signature),
("schema_order_arguments", schema_order_arguments),
("method_of", compute_method_of_yaml(f.variants)),
("mode", "native"),
("python_module", "" if f.python_module is None else f.python_module),
("returns", returns),
("inplace", f.func.name.name.inplace),
("is_factory_method", is_factory_method),
("abstract", f.is_abstract),
("device_guard", f.device_guard),
("with_gil", False),
("deprecated", False),
("has_math_kernel", f.has_composite_implicit_autograd_kernel),
]
)
# See Note [Auto generated composite kernels]
def has_autogenerated_composite_kernel(f: NativeFunction) -> bool:
return (f.structured or f.structured_delegate is not None) and (
f.func.kind() == SchemaKind.functional or f.func.kind() == SchemaKind.inplace
)
@with_native_function_and_indices
def compute_registration_declarations(
f: NativeFunction, backend_indices: Dict[DispatchKey, BackendIndex]
) -> str:
name = dispatcher.name(f.func)
returns_type = dispatcher.returns_type(
f.func.returns
).cpp_type_registration_declarations()
args = dispatcher.arguments(f.func)
args_str = ", ".join(a.no_default().decl_registration_declarations() for a in args)
comment_data: Dict[str, str] = {
"schema": f"aten::{f.func}",
# TODO: What exactly is the semantics of the 'dispatch' field?
"dispatch": str(
{k for k, v in backend_indices.items() if v.has_kernel(f)}
!= {DispatchKey.CompositeImplicitAutograd}
),
"default": str(f.has_composite_kernel or has_autogenerated_composite_kernel(f)),
}
return f"""{returns_type} {name}({args_str}); // {json.dumps(comment_data)}
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# RUN IT ALL
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def get_custom_build_selector(
provided_op_registration_allowlist: Optional[List[str]],
op_selection_yaml_path: Optional[str],
) -> SelectiveBuilder:
assert not (
provided_op_registration_allowlist is not None
and op_selection_yaml_path is not None
), (
"Both provided_op_registration_allowlist and "
+ "op_selection_yaml_path can NOT be provided at the "
+ "same time."
)
op_registration_allowlist: Optional[Set[str]] = None
if provided_op_registration_allowlist is not None:
op_registration_allowlist = set(provided_op_registration_allowlist)
if op_registration_allowlist is not None:
selector = SelectiveBuilder.from_legacy_op_registration_allow_list(
op_registration_allowlist,
True,
False,
)
elif op_selection_yaml_path is not None:
selector = SelectiveBuilder.from_yaml_path(op_selection_yaml_path)
else:
selector = SelectiveBuilder.get_nop_selector()
return selector
def get_grouped_by_view_native_functions(
native_functions: Sequence[NativeFunction],
) -> Sequence[Union[NativeFunction, NativeFunctionsViewGroup]]:
def maybe_create_view_group(
d: Dict[Union[ViewSchemaKind, SchemaKind], NativeFunction]
) -> List[Union[NativeFunction, NativeFunctionsViewGroup]]:
funcs: List[Union[NativeFunction, NativeFunctionsViewGroup]] = []
if ViewSchemaKind.aliasing in d:
view = d.pop(ViewSchemaKind.aliasing)
view_inplace = d.pop(ViewSchemaKind.aliasing_inplace, None)
view_copy = d.pop(SchemaKind.functional, None)
funcs.append(
NativeFunctionsViewGroup(
view=view,
view_copy=view_copy,
view_inplace=view_inplace,
)
)
# Take the remaining functions that weren't part of the view group
# and emit them separately
for func in d.values():
funcs.append(func)
return funcs
grouped_by_views: Dict[
FunctionSchema, Dict[Union[SchemaKind, ViewSchemaKind], NativeFunction]
] = defaultdict(dict)
for f in native_functions:
schema = f.func.view_signature()
view_kind: ViewSchemaKind = f.view_schema_kind
# We need to group up ops relevant to the same "view", consisting of:
# view op (ViewSchemaKind.aliasing)
# view_inplace op (ViewSchemaKind.aliasing_inplace)
# view_copy op (SchemaKind.functional)
if view_kind == ViewSchemaKind.non_aliasing:
kind = f.func.kind()
assert kind not in grouped_by_views[schema]
grouped_by_views[schema][kind] = f
else:
assert view_kind not in grouped_by_views[schema]
grouped_by_views[schema][view_kind] = f
return list(concatMap(maybe_create_view_group, grouped_by_views.values()))
def get_grouped_native_functions(
native_functions: Sequence[NativeFunction],
) -> Sequence[Union[NativeFunction, NativeFunctionsGroup]]:
def flatten_pre_group(
d: Dict[SchemaKind, NativeFunction]
) -> Sequence[Union[NativeFunction, NativeFunctionsGroup]]:
r = NativeFunctionsGroup.from_dict(d)
if r is None:
# Invariant: any NativeFunctions that are code-generated
# should have been grouped into NativeFunctionsGroup objects
assert not any("generated" in f.tags for f in d.values())
return list(d.values())
else:
return [r]
# TODO: how come ValuesView isn't a Sequence lol
pre_grouped_native_functions = pre_group_native_functions(native_functions)
return list(
concatMap(flatten_pre_group, list(pre_grouped_native_functions.values()))
)
# Return native function declarations grouped by their namespaces.
def get_native_function_declarations(
*,
grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
backend_indices: Dict[DispatchKey, BackendIndex],
) -> List[str]:
declarations: List[str] = []
ns_grouped_kernels: Dict[str, List[str]] = defaultdict(list)
newline = "\n"
for f in grouped_native_functions:
native_function_namespaces = set()
dispatch_keys = set()
for dispatch_key, backend_idx in backend_indices.items():
backend_metadata = backend_idx.get_kernel(f)
if backend_metadata:
namespace = backend_metadata.cpp_namespace
dispatch_keys.add(dispatch_key)
native_function_namespaces.add(namespace)
else:
namespace = DEFAULT_KERNEL_NAMESPACE
assert (
len(native_function_namespaces) <= 1
), f"Codegen only supports one namespace per operator, got {native_function_namespaces} from {dispatch_keys}"
ns_grouped_kernels[namespace].extend(
dest.compute_native_function_declaration(f, backend_idx)
)
for namespace, kernels in ns_grouped_kernels.items():
ns_helper = NamespaceHelper(
namespace_str=namespace,
entity_name="",
max_level=3,
)
# Convert to a set first to remove duplicate kernel names. Backends are
# allowed to repeat kernel names; only generate the declaration once!
ordered_kernels = list(OrderedDict.fromkeys(kernels))
declarations.extend(
f"""
{ns_helper.prologue}
{newline.join(ordered_kernels)}
{ns_helper.epilogue}
""".split(
newline
)
)
return declarations
def get_kernel_namespace(
*, f: Union[NativeFunction, NativeFunctionsGroup], backend_idx: BackendIndex
) -> str:
backend_metadata = backend_idx.get_kernel(f)
assert not backend_metadata or "::native" in backend_metadata.cpp_namespace, (
f"The kernel for function {f.func.name if isinstance(f, NativeFunction) else f.functional.func.name} "
f"with dispatch key {backend_idx.dispatch_key}"
f" has a namespace {backend_metadata.cpp_namespace} and it's not ending with '::native'."
)
return (
backend_metadata.cpp_namespace if backend_metadata else DEFAULT_KERNEL_NAMESPACE
)
# Return native function definitions grouped by dispatch key and custom namespace.
# Used in RegisterDispatchKey.cpp and etc.
def get_native_function_definitions(
*,
fm: FileManager,
grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
dispatch_key: DispatchKey,
backend_idx: BackendIndex,
selector: SelectiveBuilder,
rocm: bool,
skip_dispatcher_op_registration: bool,
gen_dispatch_helpers: bool,
) -> List[str]:
definitions: List[str] = []
ns_definitions: Dict[str, List[str]] = defaultdict(list)
anonymous_definitions: Dict[str, List[str]] = defaultdict(list)
registrations: Dict[str, Dict[str, List[str]]] = defaultdict(dict)
newline = "\n"
ns_gen = dest.RegisterDispatchKey(
backend_idx,
Target.NAMESPACED_DEFINITION,
selector,
rocm=rocm,
class_method_name=None,
skip_dispatcher_op_registration=skip_dispatcher_op_registration,
)
anonymous_gen = dest.RegisterDispatchKey(
backend_idx,
Target.ANONYMOUS_DEFINITION,
selector,
rocm=rocm,
class_method_name=None,
skip_dispatcher_op_registration=skip_dispatcher_op_registration,
)
reg_gen = dest.RegisterDispatchKey(
backend_idx,
Target.REGISTRATION,
selector,
rocm=rocm,
class_method_name=None,
skip_dispatcher_op_registration=skip_dispatcher_op_registration,
)
for f in grouped_native_functions:
kernel_namespace = get_kernel_namespace(f=f, backend_idx=backend_idx).replace(
"::native", ""
)
ns_definitions[kernel_namespace].extend(
ns_gen(f),
)
anonymous_definitions[kernel_namespace].extend(
anonymous_gen(f),
)
namespace = (
f.namespace if isinstance(f, NativeFunction) else f.functional.namespace
)
if namespace not in registrations[kernel_namespace]:
registrations[kernel_namespace] = defaultdict(list)
registrations[kernel_namespace][namespace].extend(
reg_gen(f),
)
for kernel_namespace in ns_definitions:
if len(ns_definitions[kernel_namespace]) == 0:
continue
ns_helper = NamespaceHelper(namespace_str=kernel_namespace)
registration_body = ""
for namespace in registrations[kernel_namespace]:
if not registrations[kernel_namespace][namespace]:
continue
registration_body += f"""
TORCH_LIBRARY_IMPL({namespace}, {dispatch_key}, m) {{
{newline.join(registrations[kernel_namespace][namespace])}
}};"""
definitions.extend(
fm.substitute_with_template(
"RegisterDispatchDefinitions.ini",
lambda: {
"ns_prologue": ns_helper.prologue,
"ns_epilogue": ns_helper.epilogue,
"dispatch_helpers": dest.gen_registration_helpers(backend_idx)
if gen_dispatch_helpers
else [],
"dispatch_anonymous_definitions": anonymous_definitions[
kernel_namespace
],
"static_init_dispatch_registrations": ""
if skip_dispatcher_op_registration
else registration_body,
"deferred_dispatch_registrations": "",
"dispatch_namespace": dispatch_key.lower(),
"dispatch_namespaced_definitions": ns_definitions[kernel_namespace],
},
).split(newline)
)
return definitions
# Return native function declarations grouped by dispatch key and custom namespace.
# Used in CPUFunctions_inl.h and etc.
def get_namespaced_declaration(
*,
grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
dispatch_key: DispatchKey,
backend_idx: BackendIndex,
selector: SelectiveBuilder,
rocm: bool,
) -> List[str]:
declarations: List[str] = []
ns_grouped_kernels: Dict[str, List[str]] = defaultdict(list)
newline = "\n"
func = dest.RegisterDispatchKey(
backend_idx,
Target.NAMESPACED_DECLARATION,
selector,
rocm=rocm,
class_method_name=None,
skip_dispatcher_op_registration=False,
)
for f in grouped_native_functions:
namespace = get_kernel_namespace(f=f, backend_idx=backend_idx).replace(
"native", dispatch_key.lower()
)
ns_grouped_kernels[namespace].extend(
func(f),
)
for namespace, kernels in ns_grouped_kernels.items():
if len(kernels) == 0:
continue
ns_helper = NamespaceHelper(
namespace_str=namespace, entity_name="", max_level=3
)
ordered_kernels = list(OrderedDict.fromkeys(kernels))
declarations.extend(
f"""
{ns_helper.prologue}
{newline.join(ordered_kernels)}
{ns_helper.epilogue}
""".split(
newline
)
)
return declarations
# Return native function schema registration code for aten and other namespaces.
def get_native_function_schema_registrations(
*,
native_functions: Sequence[NativeFunction],
schema_selector: SelectiveBuilder,
) -> Tuple[List[str], str]:
ns_native_functions: Dict[str, List[NativeFunction]] = defaultdict(list)
for native_function in native_functions:
ns_native_functions[native_function.namespace].append(native_function)
schema_registrations = ""
aten_schema_registrations = []
custom_namespace = None
for namespace, funcs in ns_native_functions.items():
schema_registrations_body = list(
mapMaybe(RegisterSchema(schema_selector), funcs)
)
# NB: we have to separate aten namespace registration from other namespaces,
# because in the template we hardcoded an operator for ATen already.
if namespace == "aten":
aten_schema_registrations = schema_registrations_body
else:
assert custom_namespace is None or namespace == custom_namespace, (
"Only one custom namespace (other than 'aten') is currently supported, "
f" but getting {namespace} and {custom_namespace}"
)
custom_namespace = namespace
tab = "\t"
schema_registrations += f"""
TORCH_LIBRARY({custom_namespace}, m) {{
{tab.join(schema_registrations_body)}
}};"""
return (aten_schema_registrations, schema_registrations)
def gen_aggregated_headers(
*,
native_functions: Sequence[NativeFunction],
grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
structured_native_functions: Sequence[NativeFunctionsGroup],
static_dispatch_idx: List[BackendIndex],
selector: SelectiveBuilder,
backend_indices: Dict[DispatchKey, BackendIndex],
cpu_fm: FileManager,
cuda_fm: FileManager,
functions_keys: Set[DispatchKey],
dispatch_keys: Sequence[DispatchKey],
rocm: bool,
) -> None:
# Buck doesn't support dynamic output files, so we aggregate all operator
# headers into a single file
cpu_fm.write(
"NativeMetaFunctions.h",
lambda: {
"NativeMetaFunctions_includes": [],
"NativeMetaFunctions_declarations": list(
mapMaybe(compute_meta_function_declaration, structured_native_functions)
),
},
)
method_native_functions = [
fn for fn in native_functions if Variant.method in fn.variants
]
non_method_native_functions = [
fn for fn in native_functions if fn not in method_native_functions
]
cpu_fm.write(
"MethodOperators.h",
lambda: {
"MethodOperators_includes": [],
"MethodOperators_declarations": list(
mapMaybe(
ComputeOperators(
Target.DECLARATION,
static_dispatch_backend_indices=static_dispatch_idx,
),
method_native_functions,
)
),
},
)
cpu_fm.write(
"Operators.h",
lambda: {
"Operators_includes": ["#include <ATen/MethodOperators.h>"],
"Operators_declarations": list(
mapMaybe(
ComputeOperators(
Target.DECLARATION,
static_dispatch_backend_indices=static_dispatch_idx,
),
non_method_native_functions,
)
),
},
)
cpu_fm.write(
"Functions.h",
lambda: {
"static_dispatch_extra_headers": static_dispatch_extra_headers(
static_dispatch_idx
),
"Functions_includes": ["#include <ATen/Operators.h>"],
"Functions_declarations": list(
mapMaybe(
ComputeFunction(),
native_functions,
)
),
},
)
declarations = get_native_function_declarations(
grouped_native_functions=grouped_native_functions,
backend_indices=backend_indices,
)
cpu_fm.write(
"NativeFunctions.h",
lambda: {
"NativeFunctions_includes": ["#include <ATen/NativeMetaFunctions.h>"],
"NativeFunctions_declarations": declarations,
},
)
for dispatch_key in dispatch_keys:
fm = cuda_fm if is_cuda_dispatch_key(dispatch_key) else cpu_fm
if dispatch_key in functions_keys:
inl_headers = f"#include <ATen/{dispatch_key}Functions_inl.h>"
fm.write_with_template(
f"{dispatch_key}Functions.h",
"DispatchKeyFunctions.h",
lambda: {
"dispatch_key": str(dispatch_key),
"inline_headers": inl_headers,
},
)
fm.write_with_template(
f"{dispatch_key}Functions_inl.h",
"DispatchKeyFunctions_inl.h",
lambda: {
"DispatchKeyFunctions_inl_includes": [],
"dispatch_namespace": dispatch_key.lower(),
"dispatch_namespaced_declarations": get_namespaced_declaration(
grouped_native_functions=grouped_native_functions,
dispatch_key=dispatch_key,
backend_idx=backend_indices[dispatch_key],
selector=selector,
rocm=rocm,
),
},
)
del fm
def gen_per_operator_headers(
*,
native_functions: Sequence[NativeFunction],
grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
static_dispatch_idx: List[BackendIndex],
selector: SelectiveBuilder,
backend_indices: Dict[DispatchKey, BackendIndex],
cpu_fm: FileManager,
cuda_fm: FileManager,
ops_fm: FileManager,
functions_keys: Set[DispatchKey],
dispatch_keys: Sequence[DispatchKey],
rocm: bool,
) -> None:
# For CMake builds, split operator declarations into separate headers in
# the ATen/ops folder to split up header dependencies
functions_by_root_name: Dict[str, List[NativeFunction]] = defaultdict(lambda: [])
for fn in native_functions:
functions_by_root_name[fn.root_name].append(fn)
grouped_functions_by_root_name: Dict[
str, List[Union[NativeFunction, NativeFunctionsGroup]]
] = defaultdict(lambda: [])
for group in grouped_native_functions:
name = group.root_name
grouped_functions_by_root_name[name].append(group)
for name, functions in functions_by_root_name.items():
ops_fm.write_with_template(
f"{name}_ops.h",
"Operator.h",
lambda: {
"declarations": list(
mapMaybe(
ComputeOperators(
Target.DECLARATION,
static_dispatch_backend_indices=static_dispatch_idx,
),
functions,
)
),
},
)
ops_fm.write_with_template(
f"{name}.h",
"Function.h",
lambda: {
"static_dispatch_ops_headers": list(
mapMaybe(
lambda fn: static_dispatch_ops_header(
fn, backend_index=static_dispatch_idx
),
functions,
)
),
"operator_includes": f"#include <ATen/ops/{name}_ops.h>",
"function_definitions": list(
mapMaybe(
ComputeFunction(),
functions,
)
),
},
)
grouped_functions = grouped_functions_by_root_name.get(name, [])
structured_functions = [
fn
for fn in grouped_functions
if isinstance(fn, NativeFunctionsGroup) and fn.structured
]
is_structured = len(structured_functions) > 0
if is_structured:
ops_fm.write_with_template(
f"{name}_meta.h",
"NativeMetaFunction.h",
lambda: {
"meta_function_declarations": list(
mapMaybe(
compute_meta_function_declaration, structured_functions
)
),
},
)
declarations = get_native_function_declarations(
grouped_native_functions=grouped_functions, backend_indices=backend_indices
)
ops_fm.write_with_template(
f"{name}_native.h",
"NativeFunction.h",
lambda: {
"extra_includes": (
f"#include <ATen/ops/{name}_meta.h>" if is_structured else []
),
"native_function_declarations": declarations,
},
)
for category, suffix in [
("Functions", ""),
("Operators", "_ops"),
("NativeMetaFunctions", "_meta"),
("NativeFunctions", "_native"),
]:
cpu_fm.write(
f"{category}.h",
lambda: {
f"{category}_includes": [
f"#include <ATen/ops/{name}{suffix}.h>"
for name in sorted(functions_by_root_name.keys())
],
f"{category}_declarations": [],
},
)
for dispatch_key in dispatch_keys:
if dispatch_key not in functions_keys:
continue
dispatch_namespace = dispatch_key.lower()
dispatch_names = []
for name, functions in functions_by_root_name.items():
grouped_functions = grouped_functions_by_root_name.get(name, [])
declarations = list(
concatMap(
dest.RegisterDispatchKey(
backend_indices[dispatch_key],
Target.NAMESPACED_DECLARATION,
selector,
rocm=rocm,
class_method_name=None,
skip_dispatcher_op_registration=False,
),
grouped_functions,
)
)
if len(declarations) == 0:
continue
dispatch_names.append(name)
ops_fm.write_with_template(
f"{name}_{dispatch_namespace}_dispatch.h",
"DispatchKeyFunction.h",
lambda: {
"dispatch_namespace": dispatch_namespace,
"dispatch_namespaced_declarations": declarations,
},
)
fm = cuda_fm if is_cuda_dispatch_key(dispatch_key) else cpu_fm
inl_headers = f"#include <ATen/{dispatch_key}Functions_inl.h>"
fm.write_with_template(
f"{dispatch_key}Functions.h",
"DispatchKeyFunctions.h",
lambda: {
"dispatch_key": str(dispatch_key),
"inline_headers": inl_headers,
},
)
fm.write_with_template(
f"{dispatch_key}Functions_inl.h",
"DispatchKeyFunctions_inl.h",
lambda: {
"dispatch_namespace": dispatch_namespace,
"DispatchKeyFunctions_inl_includes": [
f"#include <ATen/ops/{name}_{dispatch_namespace}_dispatch.h>"
for name in sorted(dispatch_names)
],
"dispatch_namespaced_declarations": [],
},
)
del fm
cpu_fm.write(
"MethodOperators.h",
lambda: {
"MethodOperators_includes": sorted(
f"#include <ATen/ops/{name}_ops.h>"
for name, functions in functions_by_root_name.items()
if any(Variant.method in fn.variants for fn in functions)
),
"MethodOperators_declarations": [],
},
)
def gen_headers(
*,
native_functions: Sequence[NativeFunction],
valid_tags: Set[str],
grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
structured_native_functions: Sequence[NativeFunctionsGroup],
static_dispatch_idx: List[BackendIndex],
selector: SelectiveBuilder,
backend_indices: Dict[DispatchKey, BackendIndex],
core_fm: FileManager,
cpu_fm: FileManager,
cuda_fm: FileManager,
ops_fm: FileManager,
dispatch_keys: Sequence[DispatchKey],
functions_keys: Set[DispatchKey],
rocm: bool,
per_operator_headers: bool,
) -> None:
if per_operator_headers:
gen_per_operator_headers(
native_functions=native_functions,
grouped_native_functions=grouped_native_functions,
static_dispatch_idx=static_dispatch_idx,
selector=selector,
backend_indices=backend_indices,
cpu_fm=cpu_fm,
cuda_fm=cuda_fm,
ops_fm=ops_fm,
dispatch_keys=dispatch_keys,
functions_keys=functions_keys,
rocm=rocm,
)
else:
gen_aggregated_headers(
native_functions=native_functions,
grouped_native_functions=grouped_native_functions,
structured_native_functions=structured_native_functions,
static_dispatch_idx=static_dispatch_idx,
selector=selector,
backend_indices=backend_indices,
cpu_fm=cpu_fm,
cuda_fm=cuda_fm,
dispatch_keys=dispatch_keys,
functions_keys=functions_keys,
rocm=rocm,
)
core_fm.write(
"TensorBody.h",
lambda: {
"tensor_method_declarations": list(
mapMaybe(
ComputeTensorMethod(
target=Target.DECLARATION,
static_dispatch_backend_indices=static_dispatch_idx,
),
native_functions,
)
),
"tensor_method_definitions": list(
mapMaybe(
ComputeTensorMethod(
target=Target.DEFINITION,
static_dispatch_backend_indices=static_dispatch_idx,
),
native_functions,
)
),
},
)
cpu_fm.write(
"RedispatchFunctions.h",
lambda: {
"function_redispatch_definitions": list(
mapMaybe(ComputeRedispatchFunction(), native_functions)
),
},
)
cpu_fm.write(
"RegistrationDeclarations.h",
lambda: {
"registration_declarations": [
compute_registration_declarations(f, backend_indices)
for f in native_functions
],
},
)
cpu_fm.write(
"VmapGeneratedPlumbing.h", lambda: gen_all_vmap_plumbing(native_functions)
)
def gen_aten_interned_strings() -> Dict[str, str]:
attrs = set() # All function argument names
names = set() # All ATen function names
for func in native_functions:
names.add(str(func.func.name.name))
# Some operators don't have a functional variant but we still create a
# symbol without the underscore
names.add(func.func.name.name.base)
for arg in func.func.schema_order_arguments():
attrs.add(arg.name)
# These are keywords in C++, so aren't valid symbol names
# https://en.cppreference.com/w/cpp/language/operator_alternative
names -= set(
[
"and",
"and_eq",
"bitand",
"bitor",
"compl",
"not",
"not_eq",
"or",
"or_eq",
"xor",
"xor_eq",
]
)
return {
"aten_symbols": " \\\n".join(
[f"_(aten, {name})" for name in sorted(names)]
),
"attr_symbols": " \\\n".join(
[f"_(attr, {name})" for name in sorted(attrs)]
),
}
core_fm.write("aten_interned_strings.h", gen_aten_interned_strings)
def gen_tags_enum() -> Dict[str, str]:
return {"enum_of_valid_tags": (",\n".join(sorted(valid_tags)))}
core_fm.write("enum_tag.h", gen_tags_enum)
def gen_source_files(
*,
native_functions: Sequence[NativeFunction],
grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
structured_native_functions: Sequence[NativeFunctionsGroup],
view_groups: Sequence[NativeFunctionsViewGroup],
selector: SelectiveBuilder,
static_dispatch_idx: List[BackendIndex],
backend_indices: Dict[DispatchKey, BackendIndex],
core_fm: FileManager,
cpu_fm: FileManager,
cpu_vec_fm: FileManager,
cuda_fm: FileManager,
dispatch_keys: Sequence[DispatchKey],
functions_keys: Set[DispatchKey],
rocm: bool,
force_schema_registration: bool,
per_operator_headers: bool,
skip_dispatcher_op_registration: bool,
) -> None:
extra_cuda_headers = """\
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/ATenCUDAGeneral.h>
#include <ATen/cuda/CUDADevice.h>
#include <ATen/cuda/CUDAContext.h>"""
if rocm:
extra_cuda_headers = """\
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/ATenHIPGeneral.h>
#include <ATen/hip/HIPDevice.h>
#include <ATen/hip/HIPContext.h>"""
for dispatch_key in dispatch_keys:
fm = cuda_fm if is_cuda_dispatch_key(dispatch_key) else cpu_fm
if per_operator_headers:
def operator_headers() -> List[str]:
headers = []
for g in grouped_native_functions:
is_registered = False
if backend_index.has_kernel(g):
is_registered = True
# The above has_kernel test on a group will only test for
# the existence of out dispatch, because that's how
# structured kernels work. But sometimes functions can be
# grouped but not be structured, and then you need to check
# each individual piece, as they may have manual dispatch
# entries.
elif isinstance(g, NativeFunctionsGroup) and any(
backend_index.has_kernel(fn) for fn in g.functions()
):
is_registered = True
# TODO: this condition is a bit questionable
# (It has to do with the fact that structured kernels get generated kernels
# to the Meta + CompositeExplicitAutogradNonFunctional keys).
elif g.structured and dispatch_key in (
DispatchKey.Meta,
DispatchKey.CompositeExplicitAutogradNonFunctional,
):
is_registered = True
if not is_registered:
continue
headers.append(f"#include <ATen/ops/{g.root_name}_native.h>")
if (
dispatch_key
== DispatchKey.CompositeExplicitAutogradNonFunctional
):
headers.append(f"#include <ATen/ops/{g.root_name}.h>")
if dispatch_key in functions_keys:
headers.append(
f"#include <ATen/ops/{g.root_name}_{dispatch_namespace}_dispatch.h>"
)
return sorted(set(headers))
else:
def operator_headers() -> List[str]:
headers = ["#include <ATen/NativeFunctions.h>"]
if dispatch_key == DispatchKey.CompositeExplicitAutogradNonFunctional:
headers.append("#include <ATen/Functions.h>")
if dispatch_key in functions_keys:
headers.append(f"#include <ATen/{dispatch_key!s}Functions.h>")
return headers
backend_index = backend_indices[dispatch_key]
ns_grouped_native_functions = defaultdict(list)
for grouped_native_function in grouped_native_functions:
namespace = (
grouped_native_function.namespace
if isinstance(grouped_native_function, NativeFunction)
else grouped_native_function.functional.namespace
)
ns_grouped_native_functions[namespace].append(grouped_native_function)
dispatch_namespace = str(dispatch_key).lower()
dispatch_definitions = get_native_function_definitions(
fm=fm,
grouped_native_functions=grouped_native_functions,
dispatch_key=dispatch_key,
backend_idx=backend_index,
selector=selector,
rocm=rocm,
skip_dispatcher_op_registration=skip_dispatcher_op_registration,
gen_dispatch_helpers=True,
)
fm.write_with_template(
f"Register{dispatch_key}.cpp",
"RegisterDispatchKey.cpp",
lambda: {
"extra_cuda_headers": extra_cuda_headers
if is_cuda_dispatch_key(dispatch_key)
else "",
"external_backend_headers": "",
"dispatch_headers": dest.gen_registration_headers(
backend_index, per_operator_headers, rocm
),
"ops_headers": operator_headers(),
"dispatch_helpers": "",
"dispatch_definitions": dispatch_definitions,
},
)
for g in structured_native_functions:
if not g.out.ufunc_inner_loop or not is_ufunc_dispatch_key(dispatch_key):
continue
name = g.functional.func.name.name
if dispatch_key is DispatchKey.CPU:
assert fm is cpu_fm
fm.write_with_template(
f"UfuncCPU_{name}.cpp",
"UfuncCPU.cpp",
lambda: {
"meta_declaration": compute_meta_function_declaration(g),
"native_declaration": dest.compute_native_function_declaration(
g, backend_indices[dispatch_key]
),
"native_definitions": dest.compute_ufunc_cpu(g),
},
)
cpu_vec_fm.write_with_template(
f"UfuncCPUKernel_{name}.cpp",
"UfuncCPUKernel.cpp",
lambda: {
"name": name,
"native_definitions": dest.compute_ufunc_cpu_kernel(g),
},
)
elif dispatch_key is DispatchKey.CUDA:
cuda_headers = "#include <ATen/native/cuda/Loops.cuh>"
if rocm:
cuda_headers = "#include <ATen/native/hip/Loops.cuh>"
fm.write_with_template(
f"UfuncCUDA_{name}.cu",
"UfuncCUDA.cu",
lambda: {
"name": name,
"cuda_headers": cuda_headers,
"meta_declaration": compute_meta_function_declaration(g),
"native_declaration": dest.compute_native_function_declaration(
g, backend_indices[dispatch_key]
),
"native_definitions": dest.compute_ufunc_cuda(g),
},
)
else:
raise AssertionError(f"unrecognized {dispatch_key} for ufunc")
del fm
# BackendSelect is generated specially
def gen_backend_select() -> Dict[str, List[str]]:
relevant_fns = [
fn for fn in native_functions if needs_backend_select(fn, selector)
]
return {
"ops_headers": [
f"#include <ATen/ops/{fn.root_name}_ops.h>" for fn in relevant_fns
],
"backend_select_method_definitions": list(
mapMaybe(
ComputeBackendSelect(Target.DEFINITION, selector), relevant_fns
)
),
"backend_select_function_registrations": list(
mapMaybe(
ComputeBackendSelect(Target.REGISTRATION, selector), relevant_fns
)
),
}
cpu_fm.write("RegisterBackendSelect.cpp", gen_backend_select)
schema_selector = selector
if force_schema_registration:
schema_selector = SelectiveBuilder.get_nop_selector()
(
aten_schema_registrations,
schema_registrations,
) = get_native_function_schema_registrations(
native_functions=native_functions, schema_selector=schema_selector
)
cpu_fm.write(
"RegisterSchema.cpp",
lambda: {
"aten_schema_registrations": []
if skip_dispatcher_op_registration
else aten_schema_registrations,
"schema_registrations": []
if skip_dispatcher_op_registration
else schema_registrations,
},
)
def key_func(
fn: Union[NativeFunction, NativeFunctionsGroup, NativeFunctionsViewGroup]
) -> str:
return fn.root_name
cpu_fm.write_sharded(
"Operators.cpp",
native_functions,
key_fn=key_func,
env_callable=lambda fn: {
"operator_headers": [f"#include <ATen/ops/{fn.root_name}.h>"],
"definitions": [
ComputeOperators(
Target.DEFINITION,
static_dispatch_backend_indices=static_dispatch_idx,
)(fn)
],
},
base_env={
"static_dispatch_extra_headers": static_dispatch_extra_headers(
static_dispatch_idx
),
},
num_shards=5,
sharded_keys={
"operator_headers",
"definitions",
"static_dispatch_extra_headers",
},
)
cpu_fm.write("Functions.cpp", lambda: {})
core_fm.write("TensorMethods.cpp", lambda: {})
core_fm.write(
"ATenOpList.cpp",
lambda: {
"aten_ops": list(mapMaybe(compute_aten_op, native_functions)),
},
)
def functionalization_env_callable(
g: Union[NativeFunction, NativeFunctionsGroup, NativeFunctionsViewGroup]
) -> Dict[str, List[str]]:
def gen_op_headers(
g: Union[NativeFunction, NativeFunctionsGroup, NativeFunctionsViewGroup]
) -> List[str]:
if isinstance(g, NativeFunctionsViewGroup):
# view ops always get a functionalization kernel
headers = [
f"#include <ATen/ops/{g.view.root_name}_native.h>",
f"#include <ATen/ops/{g.view.root_name}_ops.h>",
]
if g.view_copy is not None:
headers += [
f"#include <ATen/ops/{g.view_copy.root_name}_native.h>",
f"#include <ATen/ops/{g.view_copy.root_name}_ops.h>",
]
return headers
elif isinstance(g, NativeFunctionsGroup):
headers = [
f"#include <ATen/ops/{g.functional.root_name}_native.h>",
f"#include <ATen/ops/{g.functional.root_name}_ops.h>",
f"#include <ATen/ops/{g.out.root_name}_native.h>",
f"#include <ATen/ops/{g.out.root_name}_ops.h>",
]
if g.inplace is not None:
headers += [
f"#include <ATen/ops/{g.inplace.root_name}_native.h>",
f"#include <ATen/ops/{g.inplace.root_name}_ops.h>",
]
if g.mutable is not None:
headers += [
f"#include <ATen/ops/{g.mutable.root_name}_native.h>",
f"#include <ATen/ops/{g.mutable.root_name}_ops.h>",
]
return headers
else:
return [
f"#include <ATen/ops/{g.root_name}_native.h>",
f"#include <ATen/ops/{g.root_name}_ops.h>",
]
return {
"ops_headers": gen_op_headers(g),
"func_definitions": gen_functionalization_definition(
selector,
g,
),
"func_registrations": gen_functionalization_registration(
selector,
g,
backend_indices[DispatchKey.CompositeImplicitAutograd],
),
}
all_groups: List[
Union[NativeFunction, NativeFunctionsGroup, NativeFunctionsViewGroup]
] = list(structured_native_functions) + list(
view_groups # type: ignore[assignment, arg-type, operator]
)
# Note: all operators that functionalization needs to handle (mutable and aliasing ops) should be grouped properly.
# The only reason we really need to deal with direct NativeFunctions here (instead of the groups) is because:
# (1) We can provide better error checking (error out if someone introduces a mutable op that doesn't obey the grouping logic)
# (2) functionalization needs to manually register CompositeImplicitAutograd kernels, which might not be grouped.
# Although this could go away long-term if we add a dedicated dispatch key for decompositions.
structured_map: Dict[OperatorName, NativeFunction] = {
f.func.name: f
for f in concatMap(lambda g: list(g.functions()), structured_native_functions)
}
view_map: Dict[OperatorName, NativeFunction] = {
f.func.name: f for f in concatMap(lambda g: list(g.functions()), view_groups)
}
for f in native_functions:
if f.func.name not in structured_map and f.func.name not in view_map:
all_groups.append(f)
cpu_fm.write_sharded(
"RegisterFunctionalization.cpp",
all_groups,
key_fn=key_func,
env_callable=functionalization_env_callable,
num_shards=4,
sharded_keys={
"ops_headers",
"func_definitions",
"func_registrations",
"func_add_back_views_definitions",
"func_add_back_views_registrations",
},
)
cpu_fm.write(
"FunctionalInverses.h",
lambda: {
"view_inverse_declarations": list(
mapMaybe(
lambda g: gen_functionalization_view_inverse_declaration(
selector, g
),
view_groups,
)
)
},
)
view_copy_with_symint_pairs: List[Tuple[NativeFunction, NativeFunction]] = []
for g1 in view_groups:
for g2 in view_groups:
if g1.view_copy is None or g2.view_copy is None:
continue
# TODO: make this more first class in the data model
g1_base_name = str(g1.view_copy.func.name.name)
g2_base_name = str(g2.view_copy.func.name.name)
same_base_op = (
g1_base_name == g2_base_name
and g1.view_copy.func.arguments.symints_to_ints()
== g2.view_copy.func.arguments.symints_to_ints()
)
op1_not_symint = "SymInt" not in str(g1.view_copy.func.name.overload_name)
op2_symint = "SymInt" in str(g2.view_copy.func.name.overload_name)
if same_base_op and op1_not_symint and op2_symint:
view_copy_with_symint_pairs.append(
(
g1.view_copy,
g2.view_copy,
)
)
# Note [view_copy NativeFunctions]
# Every view operator in native_functions.yaml that is not CompositeImplicitAutograd
# needs to have a corresponding non-aliasing {view}_copy variant.
# Backends that use functionalization and don't know how to handle aliasing ops
# are expected to implement kernels for these {view}_copy kernels instead.
# The code for {view}_copy operators in core is pretty boilerplate-heavy however,
# so we codegen the following:
# (1) A CompositeExplicitAutogradNonFunctional kernel for every {view}_copy operator.
# These are never explicitly invoked by the functionalization pass,
# but they could theoretically be called from user code (I added these kernels for completeness,
# since the ops are part of the public API).
# (2) A derivative formula for every {view}_copy operator
# {view}_copy operators can re-use the same derivative formulas as their {view} op counterparts,
# so rather than stamping all of the entries out in derivatives.yaml,
# we codegen them in.
# This is similar to how autograd codegen doesn't require inplace ops to have a derivatives.yaml entry.
cpu_fm.write(
"CompositeViewCopyKernels.cpp",
lambda: {
"ops_headers": [
"\n".join(
f"#include <ATen/ops/{f.root_name}_ops.h>"
for f in (
[g.view] if g.view_copy is None else [g.view, g.view_copy]
)
)
for g in view_groups
]
+ [
"\n".join(
f"#include <ATen/ops/{f.root_name}_ops.h>"
for f in [g.inplace, g.mutable, g.functional]
if f is not None and "generated" not in f.tags
)
for g in structured_native_functions
],
"CompositeViewCopyKernel_Definitions": list(
mapMaybe(gen_composite_view_copy_kernel, view_groups)
),
"SymIntViewCopyKernel_Definitions": list(
mapMaybe(
lambda pair: gen_symint_view_copy_kernel(pair[0], pair[1]),
view_copy_with_symint_pairs,
)
),
"GeneratedCompositeFunctional_Definitions": list(
mapMaybe(
gen_composite_functional_kernel,
structured_native_functions,
)
),
"GeneratedCompositeOut_Definitions": list(
mapMaybe(
gen_composite_out_kernel,
structured_native_functions,
)
),
},
)
def gen_declarations_yaml(
cpu_fm: FileManager, native_functions: Sequence[NativeFunction]
) -> None:
cpu_fm.write(
"Declarations.yaml",
lambda: format_yaml([compute_declaration_yaml(f) for f in native_functions]),
)
def get_torchgen_root() -> pathlib.Path:
"""
If you're depending on torchgen out-of-tree, you can use the root to figure
out the path to native_functions.yaml
"""
return pathlib.Path(__file__).parent.resolve()
def main() -> None:
parser = argparse.ArgumentParser(description="Generate ATen source files")
parser.add_argument(
"-s",
"--source-path",
help="path to source directory for ATen",
default="aten/src/ATen",
)
parser.add_argument(
"-o",
"--output-dependencies",
help="output a list of dependencies into the given file and exit",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="run without writing any files (still updates outputs)",
)
parser.add_argument(
"--per-operator-headers",
action="store_true",
help="generate separate headers per operator in ATen/ops",
)
parser.add_argument(
"-d", "--install_dir", help="output directory", default="build/aten/src/ATen"
)
parser.add_argument(
"--rocm",
action="store_true",
help="reinterpret CUDA as ROCm/HIP and adjust filepaths accordingly",
)
parser.add_argument(
"--mps",
action="store_true",
help="Generate MPS registration code when set",
)
# TODO: --op_registration_whitelist will be removed when all call-sites
# for gen.py are moved over to using the operator YAML file for mobile
# custom build.
parser.add_argument(
"--op_registration_whitelist",
nargs="*",
help="filter op registrations by the whitelist (if set); "
"each item is `namespace`::`operator name` without overload name; "
"e.g.: aten::empty aten::conv2d ...",
)
parser.add_argument(
"--op_selection_yaml_path",
help="Provide a path to the operator selection (for custom build) YAML "
"that contains the information about the set of selected operators "
"and their categories (training, ...). Each operator is either a "
"full operator name with overload or just a bare operator name. "
"The operator names also contain the namespace prefix (e.g. aten::)",
)
parser.add_argument(
"--backend_whitelist",
nargs="*",
help="filter dispatch backend by the whitelist (if set), "
"e.g.: CPU CUDA QuantizedCPU ...",
)
parser.add_argument(
"--static_dispatch_backend",
nargs="*",
help="generate static dispatch code for the specific backend (if set)",
)
parser.add_argument(
"--skip_dispatcher_op_registration",
action="store_true",
help="Avoid registering operators into the dispatcher.",
)
parser.add_argument(
"--force_schema_registration",
action="store_true",
help="force it to generate schema-only registrations for all ops, including"
"those that are not listed on --op_registration_whitelist",
)
parser.add_argument(
"--generate",
type=str,
nargs="*",
choices=["headers", "sources", "declarations_yaml"],
default=["headers", "sources", "declarations_yaml"],
help="Generate only a subset of files",
)
options = parser.parse_args()
selector = get_custom_build_selector(
options.op_registration_whitelist,
options.op_selection_yaml_path,
)
native_yaml_path = os.path.join(options.source_path, "native/native_functions.yaml")
tags_yaml_path = os.path.join(options.source_path, "native/tags.yaml")
from torchgen.model import dispatch_keys
# TODO: stop generating CUDA kernels for non-CUDA builds
ignore_keys = set()
if not options.mps:
ignore_keys.add(DispatchKey.MPS)
if DispatchKey.MPS in dispatch_keys:
del dispatch_keys[dispatch_keys.index(DispatchKey.MPS)]
parsed_yaml = parse_native_yaml(native_yaml_path, tags_yaml_path, ignore_keys)
valid_tags = _GLOBAL_PARSE_TAGS_YAML_CACHE[tags_yaml_path]
native_functions, backend_indices = (
parsed_yaml.native_functions,
parsed_yaml.backend_indices,
)
grouped_native_functions = get_grouped_native_functions(native_functions)
structured_native_functions = [
g for g in grouped_native_functions if isinstance(g, NativeFunctionsGroup)
]
native_functions_with_view_groups = get_grouped_by_view_native_functions(
native_functions
)
view_groups = [
g
for g in native_functions_with_view_groups
if isinstance(g, NativeFunctionsViewGroup)
]
# NB: It is mandatory to NOT use os.path.join here, as the install directory
# will eventually be ingested by cmake, which does not respect Windows style
# path slashes. If you switch this to use os.path.join, you'll get an error
# like:
#
# Syntax error in cmake code when parsing string
#
# C:/Jenkins/workspace/pytorch-builds/pytorch-win-ws2016-cuda9-cudnn7-py3-build/build/aten/src/ATen\core/TensorMethods.h
#
# Invalid character escape '\c'.
core_install_dir = f"{options.install_dir}/core"
pathlib.Path(core_install_dir).mkdir(parents=True, exist_ok=True)
ops_install_dir = f"{options.install_dir}/ops"
pathlib.Path(ops_install_dir).mkdir(parents=True, exist_ok=True)
core_fm = make_file_manager(options=options, install_dir=core_install_dir)
cpu_fm = make_file_manager(options=options)
cpu_vec_fm = make_file_manager(options=options)
cuda_fm = make_file_manager(options=options)
ops_fm = make_file_manager(options=options, install_dir=ops_install_dir)
# Only a limited set of dispatch keys get CPUFunctions.h headers generated
# for them; this is the set
functions_keys = {
DispatchKey.CPU,
DispatchKey.CUDA,
DispatchKey.CompositeImplicitAutograd,
DispatchKey.CompositeExplicitAutograd,
DispatchKey.CompositeExplicitAutogradNonFunctional,
DispatchKey.Meta,
}
if options.mps:
functions_keys.add(DispatchKey.MPS)
if options.backend_whitelist:
dispatch_keys = [
k
for k in dispatch_keys
if is_generic_dispatch_key(k) or str(k) in options.backend_whitelist
]
static_dispatch_idx: List[BackendIndex] = []
if options.static_dispatch_backend:
static_dispatch_idx = [
backend_indices[DispatchKey.parse(key)]
for key in options.static_dispatch_backend
]
for key in options.static_dispatch_backend:
dp_key = DispatchKey.parse(key)
if dp_key not in functions_keys:
functions_keys.add(dp_key)
if "sources" in options.generate:
gen_source_files(
native_functions=native_functions,
grouped_native_functions=grouped_native_functions,
structured_native_functions=structured_native_functions,
view_groups=view_groups,
selector=selector,
static_dispatch_idx=static_dispatch_idx,
backend_indices=backend_indices,
core_fm=core_fm,
cpu_fm=cpu_fm,
cpu_vec_fm=cpu_vec_fm,
cuda_fm=cuda_fm,
dispatch_keys=dispatch_keys,
functions_keys=functions_keys,
rocm=options.rocm,
force_schema_registration=options.force_schema_registration,
per_operator_headers=options.per_operator_headers,
skip_dispatcher_op_registration=options.skip_dispatcher_op_registration,
)
if "headers" in options.generate:
gen_headers(
native_functions=native_functions,
valid_tags=valid_tags,
grouped_native_functions=grouped_native_functions,
structured_native_functions=structured_native_functions,
static_dispatch_idx=static_dispatch_idx,
selector=selector,
backend_indices=backend_indices,
core_fm=core_fm,
cpu_fm=cpu_fm,
cuda_fm=cuda_fm,
ops_fm=ops_fm,
dispatch_keys=dispatch_keys,
functions_keys=functions_keys,
rocm=options.rocm,
per_operator_headers=options.per_operator_headers,
)
if "declarations_yaml" in options.generate:
gen_declarations_yaml(native_functions=native_functions, cpu_fm=cpu_fm)
if options.output_dependencies:
depfile_path = pathlib.Path(options.output_dependencies).resolve()
depfile_name = depfile_path.name
depfile_stem = depfile_path.stem
for fm, prefix in [
(cpu_fm, ""),
(cpu_vec_fm, "cpu_vec_"),
(core_fm, "core_"),
(cuda_fm, "cuda_"),
(ops_fm, "ops_"),
]:
varname = prefix + depfile_stem
path = depfile_path.parent / (prefix + depfile_name)
fm.write_outputs(varname, str(path))
if __name__ == "__main__":
main()
| pytorch-master | torchgen/gen.py |
import threading
from contextlib import contextmanager
from typing import Iterator, Optional
# Simple dynamic scoping implementation. The name "parametrize" comes
# from Racket.
#
# WARNING WARNING: LOOKING TO EDIT THIS FILE? Think carefully about
# why you need to add a toggle to the global behavior of code
# generation. The parameters here should really only be used
# for "temporary" situations, where we need to temporarily change
# the codegen in some cases because we cannot conveniently update
# all call sites, and are slated to be eliminated once all call
# sites are eliminated. If you don't have a plan for how to get there,
# DON'T add a new entry here.
class Locals(threading.local):
use_const_ref_for_mutable_tensors: Optional[bool] = None
_locals = Locals()
def use_const_ref_for_mutable_tensors() -> bool:
assert _locals.use_const_ref_for_mutable_tensors is not None, (
"need to initialize local.use_const_ref_for_mutable_tensors with "
"local.parametrize"
)
return _locals.use_const_ref_for_mutable_tensors
@contextmanager
def parametrize(*, use_const_ref_for_mutable_tensors: bool) -> Iterator[None]:
old_use_const_ref_for_mutable_tensors = _locals.use_const_ref_for_mutable_tensors
try:
_locals.use_const_ref_for_mutable_tensors = use_const_ref_for_mutable_tensors
yield
finally:
_locals.use_const_ref_for_mutable_tensors = (
old_use_const_ref_for_mutable_tensors
)
| pytorch-master | torchgen/local.py |
from typing import Callable, List, Optional, Tuple, Union
from torchgen.api import cpp, dispatcher
from torchgen.api.translate import translate
from torchgen.api.types import (
BaseCType,
Binding,
CType,
DispatcherSignature,
FunctionalizationLambda,
NativeSignature,
tensorListT,
tensorT,
VectorCType,
ViewInverseSignature,
)
from torchgen.context import (
native_function_manager,
with_native_function,
with_native_function_and,
)
from torchgen.model import (
Argument,
BackendIndex,
BaseTy,
BaseType,
FunctionSchema,
ListType,
NativeFunction,
NativeFunctionsGroup,
NativeFunctionsViewGroup,
Return,
SchemaKind,
SelfArgument,
TensorOptionsArguments,
)
from torchgen.native_function_generation import (
INPLACE_OPS_THAT_DONT_GET_GROUPED_PROPERLY,
MUTABLE_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT,
OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY,
)
from torchgen.selective_build.selector import SelectiveBuilder
# Note: [Mutable Ops Not Using Functionalization]
# Ops in this list currently do not work with functionalization and should be fixed.
MUTABLE_OPS_NOT_USING_FUNCTIONALIZATION = (
OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY
+ MUTABLE_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT
+ INPLACE_OPS_THAT_DONT_GET_GROUPED_PROPERLY
+ [
# It will be BC-breaking, but we should fix their schemas.
# should be inplace?
"record_stream",
# See Note [resize_ in Functionalization]
"resize_",
]
)
# This file contains codegen that relates to the functionalization pass.
# It includes:
# - gen_functionalization_definition
# Generates dispatcher kernel definitions for the functionalization pass.
# - gen_functionalization_registration
# Generates dispatcher kernel registrations for the functionalization pass.
# - gen_functionalization_view_inverse_declaration
# Generates a declaration for an "inverse view", for every view op
# that is needed in functionalization. We manually implement their definitions.
# - gen_composite_view_copy_kernel
# Generates view_copy() composite kernels for all view_copy operators.
# Generates the body of the default composite C++ kernel for a {view}_copy NativeFunction
# See Note [view_copy NativeFunctions]
@with_native_function
def gen_composite_view_copy_kernel(g: NativeFunctionsViewGroup) -> Optional[str]:
if g.view_copy is None:
return None
# For view_copy.SymInt overloads,
# See gen_symint_view_copy_kernel.
if g.view_copy.func.name.overload_name == "SymInt":
return None
# We can make view_copy work in more cases by using reshape()
# when a normal view call would ordinarily fail.
# This also makes LTC more efficient, because they don't need to include
# clone() calls in their graph (which is normally needed by reshape).
if str(g.view_copy.func.name) == "view_copy":
return """\
at::Tensor view_copy(const at::Tensor & self, at::IntArrayRef size) {
DimVector shape = infer_size_dv(size, self.numel());
if (!at::detail::computeStride(self.sizes(), self.strides(), shape).has_value()) {
return self.reshape(size);
} else {
auto output = at::_ops::view::call(self, size);
return output.clone();
}
}
"""
# view_copy is a native signature, since we're generating an at::native:: kernel
view_copy_sig = NativeSignature(g.view_copy.func)
# view is a dispatcher signature, since we're calling into the at::_ops API
view_sig = DispatcherSignature(g.view.func)
view_api_name = g.view.func.name.unambiguous_name()
exprs = ", ".join(
[e.expr for e in translate(view_copy_sig.arguments(), view_sig.arguments())]
)
# view ops today always return either a Tensor or a list of Tensors
assert len(g.view.func.returns) == 1
assert g.view.func.returns[0].type == BaseType(
BaseTy.Tensor
) or g.view.func.returns[0].type == ListType(BaseType(BaseTy.Tensor), None)
if g.view.func.returns[0].type == BaseType(BaseTy.Tensor):
return_cloned_output = """\
return output.clone();"""
else:
# If the return type is a list, we need to clone each tensor in the list.
return_cloned_output = f"""\
{view_copy_sig.returns_type().cpp_type()} out_clone;
for (const auto i : c10::irange(output.size())) {{
out_clone.push_back(output[i].clone());
}}
return out_clone;"""
# The default generated composite kernel for {view}_copy() operators just clones
# the input tensor, and runs the underlying view on the clone.
return f"""
{view_copy_sig.defn()} {{
auto output = at::_ops::{view_api_name}::call({exprs});
{return_cloned_output}
}}
"""
# For symint view copy kernels, we want to generate them to call into
# their concrete view_copy counterparts.
@with_native_function_and
def gen_symint_view_copy_kernel(
view_copy: NativeFunction, view_copy_symint: NativeFunction
) -> str:
# view_copy.symint is a native signature, since we're generating an at::native:: kernel
view_copy_symint_sig = NativeSignature(view_copy_symint.func)
# view_copy is a dispatcher signature, since we're calling into the at::_ops API
view_copy_sig = DispatcherSignature(view_copy.func)
exprs = ", ".join(
[
e.expr
for e in translate(
view_copy_symint_sig.arguments(), view_copy_sig.arguments()
)
]
)
return f"""
{view_copy_symint_sig.defn()} {{
return at::_ops::{view_copy.func.name.unambiguous_name()}::call({exprs});
}}
"""
def return_str(rets: Tuple[Return, ...], names: List[str]) -> str:
assert len(rets) == len(names)
if len(rets) == 0:
return ""
elif len(rets) == 1:
return f"return {names[0]};"
else:
return f"return {dispatcher.returns_type(rets).cpp_type()}({', '.join(names)});"
def modifies_arguments(f: NativeFunction) -> bool:
return any(
a.annotation is not None and a.annotation.is_write
for a in f.func.arguments.flat_all
)
def wrapper_name(func: FunctionSchema) -> str:
if func.name.overload_name:
return f"{cpp.name(func)}_{func.name.overload_name}"
else:
return cpp.name(func)
def is_tensor_like(a: Union[Argument, TensorOptionsArguments, SelfArgument]) -> bool:
return isinstance(a, SelfArgument) or (
isinstance(a, Argument) and a.type.is_tensor_like()
)
# We need to wrap / unwrap various arguments from the op in the functionalization kernels.
# Some op schemas include non-owning types though (like TensorList),
# and when we unwrap them we expect to get out an owning type!.
# We also return a lambda that tells you how to conver the non-owning type argument into the owning type.
def get_owning_type(t: CType) -> Tuple[CType, Callable[[str], str]]:
if t == BaseCType(tensorListT):
return VectorCType(BaseCType(tensorT)), lambda x: f"{x}.vec()"
# There are technically other non-owning types out there (like IntArrayRef),
# but functionalization only actually cares about the ones involving tensors.
return t, lambda x: x
# unwraps all tensor-like arguments, returning:
# (1) a string containing all of the logic that does the unwrapping
# (2) a context, to be used by translate(), with all of the relevant bindings.
def unwrap_tensor_args(
sig: DispatcherSignature, *, is_view_op: bool
) -> Tuple[str, List[Binding]]:
context: List[Binding] = []
unwrapped_tensor_args: List[str] = []
for arg in sig.arguments():
if is_tensor_like(arg.argument):
# for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
unwrapped_name = f"{arg.name}_"
# For most ops, the functionalization needs to sync any pending updates on the input tensors
# before calling the operator, since otherwise the operator will act on stale data.
# For view ops though, we can continue to defer syncing until the tensor is used by
# a non-view operator.
maybe_sync_input = (
"" if is_view_op else f"at::functionalization::impl::sync({arg.name});"
)
unwrapped_type, conversion_fn = get_owning_type(
arg.nctype.remove_const_ref().type
)
unwrapped_tensor_args.append(
f"""
{unwrapped_type.cpp_type()} {unwrapped_name};
if (at::functionalization::impl::isFunctionalTensor({arg.name})) {{
{maybe_sync_input}
{unwrapped_name} = at::functionalization::impl::from_functional_tensor({arg.name});
}} else {{
{unwrapped_name} = {conversion_fn(arg.name)};
}}"""
)
context.append(arg.with_name(unwrapped_name))
else:
# for non-tensor inputs, we want to pass them directly into the redispatch calls.
context.append(arg)
unwrap_tensor_args_str = "\n ".join(unwrapped_tensor_args)
return unwrap_tensor_args_str, context
# converts all tensor-like arguments to meta tensors, which are used to compute stride info. Returns:
# (1) a string containing all of the logic that does the conversions.
# (2) a context, to be used by translate(), with all of the relevant bindings.
def convert_to_meta_tensors(sig: DispatcherSignature) -> Tuple[str, List[Binding]]:
context: List[Binding] = []
unwrapped_tensor_args: List[str] = []
for arg in sig.arguments():
if is_tensor_like(arg.argument):
# for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
a_ = arg.name
unwrapped_name = f"{arg.name}_meta"
unwrapped_tensor_args.append(f"auto {unwrapped_name} = to_meta({a_});")
context.append(arg.with_name(unwrapped_name))
else:
# for non-tensor inputs, we want to pass them directly into the redispatch calls.
context.append(arg)
unwrap_tensor_args_str = "\n ".join(unwrapped_tensor_args)
return unwrap_tensor_args_str, context
# The functionalization codegen currently expects view op schemas to have this form:
# foo(Tensor(a), ...) -> Tensor(a) (e.g. transpose)
# foo(Tensor(a!), ...) -> Tensor(a!) (e.g. transpose_)
def assert_view_op_properties(func: FunctionSchema) -> None:
def is_alias(a: Argument) -> bool:
return a.annotation is not None
args = func.arguments.flat_non_out
# The first argument is a tensor with an alias semantics (annotations)
assert len(args) > 0 and args[0].type == BaseType(
BaseTy.Tensor
), f"""In the functionalization codegen, we expect the first argument of every view operator to be a tensor,
but found an argument of type {str(args[0].type)} for operator: {str(func.name)}."""
# No other arguments have aliasing semantics
assert is_alias(args[0]) and not any(
is_alias(a) for a in args[1:]
), """In the functionalization codegen, we expect the first argument of every view operator to alias the output.
View operators with multiple aliasing inputs aren't supported yet. Found an operator that doesn't satisfy this constraint"""
# Generates the Functionalization kernel for:
# - ops that create aliases (e.g. transpose())
# - ops that are views AND mutations (e.g. transpose_())
def emit_view_functionalization_body(
g: NativeFunctionsViewGroup, *, view_inplace: bool
) -> str:
if view_inplace:
# This op is both an inplace op AND a view op.
# See Note [Functionalization Pass - Inplace View Ops] for details.
# I currently have the view meta call into the out-of-place variant of the view, to avoid
# having to define an extra ~20 inplace {view}_inverse_ functions.
# Most view ops don't have NativeFunctionGroup's both, because we don't define out= variants for view ops.
# I'm assuming that every inplace-view op has a corresponding out-of-place view op,
# with the same name but the trailing underscore removed.
# This is currently asserted at parse time in gen.py (see error_check_native_functions).
assert g.view_inplace is not None
f = g.view_inplace
else:
f = g.view
assert g.view_copy is not None
with native_function_manager(f):
call_sig = DispatcherSignature.from_schema(g.view_copy.func)
# the "view_copy" op name that the functionalization kernels need to call
api_name = g.view_copy.func.name.unambiguous_name()
# Sometimes the functionalization pass needs to no-op (e.g. if it was passed non-functional tensors)
# "no-op"ing in this context is just redispatching to the original op.
noop_api_name = f.func.name.unambiguous_name()
dispatcher_sig = DispatcherSignature.from_schema(f.func)
assert_view_op_properties(f.func)
view_tensor_name = dispatcher_sig.arguments()[0].name
return_type = dispatcher_sig.returns_type().remove_const_ref().cpp_type()
unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(
dispatcher_sig, is_view_op=True
)
view_redispatch_args = [
e.expr
for e in translate(unwrapped_args_ctx, call_sig.arguments(), method=False)
]
forward_lambda = FunctionalizationLambda.from_func(g, is_reverse=False)
reverse_lambda = FunctionalizationLambda.from_func(g, is_reverse=True)
# The meta API call should use the same arguments, but convert all tensors to meta tensors first.
meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig)
meta_call_args = [
e.expr for e in translate(meta_call_ctx, call_sig.arguments(), method=False)
]
if "inplace_view" in f.tags:
# See Note [Functionalization Pass - Inplace View Ops] for more details
return f"""
{dispatcher_sig.defn(name=wrapper_name(f.func), is_redispatching_fn=True)} {{
if (!at::functionalization::impl::isFunctionalTensor({view_tensor_name})) {{
// functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
{unwrap_tensor_args_str}
at::AutoDispatchSkipFunctionalize guard;
return at::_ops::{noop_api_name}::call({', '.join(view_redispatch_args)});
}}
auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
{forward_lambda.decl()} {{
if (reapply_views) {{
return {forward_lambda.inner_call(reapply_views=True)}
}} else {{
return {forward_lambda.inner_call(reapply_views=False)}
}}
}},
{reverse_lambda.decl()} {{
return {reverse_lambda.inner_call()}
}}
);
{return_type} reference_tensor_output;
{{
at::AutoDispatchSkipFunctionalize func_guard;
c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
{meta_conversion_str}
reference_tensor_output = at::_ops::{noop_api_name}::call({', '.join(meta_call_args)});
}}
// This function adds the above view meta to the current tensor and replays them off the base,
// mutating the size/stride info of the current FunctionalTensorWrapper.
// Because of this, we need to make sure to run the reference shape function above,
// BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides)
at::functionalization::impl::mutate_view_meta({view_tensor_name}, view_meta);
// See Note [Propagating strides in the functionalization pass]
// XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely
// on a reference implementation here (instead of relying on the output from the forward lambda
// having the correct stride info)
at::functionalization::impl::set_sizes_strides_offset({view_tensor_name}, reference_tensor_output);
return {view_tensor_name};
}}
"""
else:
return f"""
{dispatcher_sig.defn(name=wrapper_name(f.func), is_redispatching_fn=True)} {{
{unwrap_tensor_args_str}
if (!at::functionalization::impl::isFunctionalTensor({view_tensor_name})) {{
// functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
at::AutoDispatchSkipFunctionalize guard;
return at::_ops::{noop_api_name}::call({', '.join(view_redispatch_args)});
}}
auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
{return_type} reference_tensor_output;
{{
at::AutoDispatchSkipFunctionalize func_guard;
c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
{meta_conversion_str}
reference_tensor_output = at::_ops::{noop_api_name}::call({', '.join(meta_call_args)});
}}
{return_type} tmp_output;
{{
at::AutoDispatchSkipFunctionalize guard;
if (reapply_views) {{
tmp_output = at::_ops::{noop_api_name}::call({', '.join(view_redispatch_args)});
}} else {{
tmp_output = at::_ops::{api_name}::call({', '.join(view_redispatch_args)});
}}
}}
at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
{forward_lambda.decl()} {{
if (reapply_views) {{
return {forward_lambda.inner_call(reapply_views=True)}
}} else {{
return {forward_lambda.inner_call(reapply_views=False)}
}}
}},
{reverse_lambda.decl()} {{
return {reverse_lambda.inner_call()}
}}
);
auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, {view_tensor_name}, view_meta);
// See Note [Propagating strides in the functionalization pass]
at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
return out;
}}
"""
def maybe_create_output(f: NativeFunction, var_name: str) -> str:
if len(f.func.returns) == 0:
return ""
return_type = dispatcher.returns_type(f.func.returns).remove_const_ref().cpp_type()
return f"{return_type} {var_name} = "
# Given a NativeFunction, and a variable name corresponding to the output of redispatching on the function,
# this returns two lists of names, consisting of:
# - the names of returns corresponding to the original (mutable) inputs of the outer function
# - the names of returns corresponding to the (immutable) outputs of the inner redispatched function
def get_mutable_redispatch_return_names(
f: NativeFunction, inner_return_var: str
) -> Tuple[List[str], List[str]]:
aliased_returns = []
non_aliased_returns = []
for (i, name) in enumerate(f.func.aliased_return_names()):
if name is not None:
aliased_returns.append(name)
else:
non_aliased_returns.append(
inner_return_var
if len(f.func.returns) == 1
else f"std::get<{i}>({inner_return_var})"
)
return aliased_returns, non_aliased_returns
# When functionalization "no-op's" and redispatches on a mutable operator, we need to take care so that:
# - For fresh outputs, we return the result of the redispatch (without wrapping outputs)
# - For outputs that were aliased to inputs, we return the inputs directly (since some of them might have been wrapped)
def return_from_mutable_noop_redispatch(
f: NativeFunction, inner_return_var: str
) -> str:
aliased, non_aliased = get_mutable_redispatch_return_names(f, inner_return_var)
# Just get all of the return names, and immediately return them
return return_str(f.func.returns, aliased + non_aliased)
def wrap_propagate_mutations_and_return(
f: NativeFunction, functional_op: NativeFunction, inner_return_var: str
) -> str:
mutable_arg_names = f.func.arguments.mutable_arg_names()
(
aliased_outer_rets,
non_aliased_outer_rets,
) = get_mutable_redispatch_return_names(f, inner_return_var)
_, non_aliased_inner_rets = get_mutable_redispatch_return_names(
functional_op, inner_return_var
)
# The outer function may have a mix of aliased and non-aliased outputs,
# But the inner functional op that we're transforming to should only have non-aliased outputs
assert len(mutable_arg_names) + len(non_aliased_outer_rets) == len(
non_aliased_inner_rets
)
# First, take all of the newly created outputs from the inner call and wrap them into functional tensors
updates = []
non_aliased_wrapped_ret_names = []
for (i, inner_ret) in enumerate(
non_aliased_inner_rets[: len(non_aliased_outer_rets)]
):
ret_name = f"output_{i}"
updates.append(
f"""\
auto output_{i} = at::functionalization::impl::to_functional_tensor({inner_ret});"""
)
non_aliased_wrapped_ret_names.append(ret_name)
# Next, take all of the mutated outputs from the inner call corresponding to mutated inputs,
# and propogate the mutations
for (outer_arg, inner_ret) in zip(
mutable_arg_names, non_aliased_inner_rets[len(non_aliased_outer_rets) :]
):
updates.append(
f"""\
at::functionalization::impl::replace_({outer_arg}, {inner_ret});
at::functionalization::impl::commit_update({outer_arg});"""
)
# Finally, we return:
# - Any mutable arguments that also returns
# - Any immutable returns that were created wrapping the output from the inner call
returns_str = return_str(
f.func.returns, aliased_outer_rets + non_aliased_wrapped_ret_names
)
updates_str = "\n".join(updates)
return f"""\
{updates_str}
{returns_str}"""
# Generates the Functionalization kernel for:
# - mutation ops (inplace and out= ops)
@with_native_function_and
def emit_inplace_functionalization_body(
f: NativeFunction, g: NativeFunctionsGroup
) -> str:
# mutation case
assert modifies_arguments(f)
dispatcher_sig = DispatcherSignature.from_schema(f.func)
unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(
dispatcher_sig, is_view_op=False
)
mutated_names = [
a.name
for a in f.func.arguments.flat_all
if a.type.is_tensor_like() and a.annotation is not None
]
non_mutated_names = [
a.name
for a in f.func.arguments.flat_all
if a.type.is_tensor_like() and a.annotation is None
]
# all mutable inputs must be functional tensors in order to participate in functionalization
check_all_mutated_args_are_functional = " && ".join(
["true"]
+ [
f"at::functionalization::impl::isFunctionalTensor({a})"
for a in mutated_names
]
)
check_any_non_mutated_args_are_functional = " || ".join(
["false"]
+ [
f"at::functionalization::impl::isFunctionalTensor({a})"
for a in non_mutated_names
]
)
# These are used in the cases where we don't functionalize and redispatch to the inplace op
# case 1: we hit an inplace op that doesn't have an out-of-place equivalent
# case 2: we hit an inplace ops but our inputs are not functional tensors (in which case our kernel just no-ops)
inplace_exprs = [
e.expr
for e in translate(unwrapped_args_ctx, dispatcher_sig.arguments(), method=False)
]
# call the out-of-place variant of the op
return_type = (
dispatcher.returns_type(g.functional.func.returns).remove_const_ref().cpp_type()
)
functional_sig = DispatcherSignature.from_schema(g.functional.func)
functional_exprs = [
e.expr
for e in translate(unwrapped_args_ctx, functional_sig.arguments(), method=False)
]
if f.func.is_out_fn():
mutable_input_post_processing = "\n".join(
[
f"""
at::functionalization::impl::replace_(
{a.name}, {'std::get<' + str(i) + '>(tmp_output)' if len(f.func.returns) > 1 else 'tmp_output'});
at::functionalization::impl::commit_update({a.name});"""
for (i, a) in enumerate(f.func.arguments.out)
if a.annotation and a.annotation.is_write and a.type.is_tensor_like()
]
)
else:
mutable_input_post_processing = "\n".join(
[
f"""
at::functionalization::impl::replace_({a.name}, tmp_output);
at::functionalization::impl::commit_update({a.name});"""
for a in f.func.arguments.flat_all
if a.annotation and a.annotation.is_write and a.type.is_tensor_like()
]
)
meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig)
return f"""
{dispatcher_sig.defn(name=wrapper_name(f.func), is_redispatching_fn=True)} {{
if ({str(f.func.kind() == SchemaKind.inplace).lower()}) {{
// Before converting the mutable op to its functional variant, run meta tensors through the original op.
// This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
// (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
at::AutoDispatchSkipFunctionalize func_guard;
c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
{meta_conversion_str}
at::_ops::{f.func.name.unambiguous_name()}::call({', '.join(a.name for a in meta_call_ctx)});
}}
{unwrap_tensor_args_str}
if (!({check_all_mutated_args_are_functional})) {{
if (({check_any_non_mutated_args_are_functional})) {{
// case 1: trying to mutate a non functional tensor with a functional tensor is an error
TORCH_INTERNAL_ASSERT(false,
"mutating a non-functional tensor with a functional tensor is not allowed.",
" Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
}} else {{
// case 2: arguments are not functional tensors, so we no-op and redispatch.
at::AutoDispatchSkipFunctionalize guard;
{maybe_create_output(f, 'tmp_output')}at::_ops::{f.func.name.unambiguous_name()}::call({', '.join(inplace_exprs)});
{return_from_mutable_noop_redispatch(f, 'tmp_output')};
}}
}} else {{
{return_type} tmp_output;
{{
at::AutoDispatchSkipFunctionalize guard;
tmp_output = at::_ops::{g.functional.func.name.unambiguous_name()}::call({', '.join(functional_exprs)});
}}
{wrap_propagate_mutations_and_return(f, g.functional, 'tmp_output')}
}}
}}"""
# The below functions generate RegisterFunctionalization.cpp
# These files provide the kernels that run the functionalization pass, which can be opted into
# per backend (e.g. XLA or Vulkan), or as a composable transform (functionalize() in functorch).
# See Note [Functionalization Pass: View Inverses].
def gen_functionalization_view_inverse_declaration(
selector: SelectiveBuilder, g: NativeFunctionsViewGroup
) -> Optional[str]:
# For every (non-composite) view op, we need a corresponding "inverse view" function.
# This generates the declarations so we get a good compiler error when someone adds a new view.
@with_native_function
def emit_decl_helper(g: NativeFunctionsViewGroup) -> Optional[str]:
if g.view.has_composite_implicit_autograd_kernel:
return None
view_copy_inverse_sig = ViewInverseSignature(g)
return view_copy_inverse_sig.decl()
return emit_decl_helper(g)
def gen_functionalization_registration(
selector: SelectiveBuilder,
g: Union[NativeFunction, NativeFunctionsGroup, NativeFunctionsViewGroup],
composite_implicit_autograd_index: BackendIndex,
) -> List[str]:
@with_native_function
def emit_registration_helper(f: NativeFunction) -> str:
if f.has_composite_implicit_autograd_kernel:
metadata = composite_implicit_autograd_index.get_kernel(f)
assert metadata is not None
native_api_name = metadata.kernel
sig = DispatcherSignature.from_schema(f.func)
# Note [Composite view ops in the functionalization pass]
# We don't need to worry about implemententing functionalization kernels for views with
# CompositeImplicitAutograd kernels, because we can just decompose them into their base operators.
# We can't just opt the entire Functionalization dispatch key into the composite keyset though,
# because we don't want to decompose non-view ops that are composite, like `at::ones`.
registration_str = (
f"static_cast<{sig.ptr_type()}>(at::native::{native_api_name})"
)
else:
# non-composite view ops (and inplace ops) get a normal registration.
registration_str = f"TORCH_FN(functionalization::{wrapper_name(f.func)})"
return f'm.impl("{f.func.name}", {registration_str});'
# Don't generate kernels in mobile build
if not selector.include_all_operators:
return []
if isinstance(g, NativeFunctionsViewGroup):
# functionalization needs to register kernels for view + view_inplace ops
# See Note [Functionalization <> torch.Tensor constructor]
if str(g.view.func.name) == "lift_fresh":
return []
view_str = [emit_registration_helper(g.view)]
if g.view_inplace is not None:
assert g.view_inplace.is_view_op
view_str.append(emit_registration_helper(g.view_inplace))
return view_str
elif isinstance(g, NativeFunctionsGroup):
fns = list(g.functions())
else:
if str(g.func.name) in MUTABLE_OPS_NOT_USING_FUNCTIONALIZATION:
return []
fns = [g]
registrations = []
for f in fns:
if str(f.func.name) == "lift":
# See Note [Functionalization <> torch.Tensor constructor]
return []
if str(f.func.name) == "resize_":
# See Note [resize_ in Functionalization]
return []
assert not f.is_view_op
# functionalization needs to generate and register kernals for inplace ops.
# We *also* need to directly register CompositeImplicitAUtograd kernels
# so that they decompose properly before functioanlization.
if modifies_arguments(f) or f.has_composite_implicit_autograd_kernel:
registrations.append(emit_registration_helper(f))
return registrations
def gen_functionalization_definition(
selector: SelectiveBuilder,
# Note: Ideally this code should never have to look at NativeFunction
# (and instead only need to operate on grouped NativeFunctions).
# The only reason currently is because we need to emit direct dispatch registrations
# For CompositeImplicitAutograd operators, which are potentially ungrouped.
g: Union[NativeFunction, NativeFunctionsGroup, NativeFunctionsViewGroup],
) -> List[str]:
# Don't generate kernels in mobile build
if not selector.include_all_operators:
return []
if isinstance(g, NativeFunctionsViewGroup):
# Case 1: emit view -> view_copy kernels for the functionalization pass
view_defs = []
if not g.composite:
# invariant: NativeFunctionsViewGroup's always have a view_copy operator
# if the view is not composite (implicit autograd)
assert g.view_copy is not None
view_defs.append(emit_view_functionalization_body(g, view_inplace=False))
if g.view_inplace is not None:
view_defs.append(emit_view_functionalization_body(g, view_inplace=True))
return view_defs
elif isinstance(g, NativeFunction):
# Invariant: all mutable operators that we need to handle in functionalization
# should have been properly grouped up.
# TODO: The below ops all have "problematic" schemas that prevent them from
# getting functionalized. Instead of bending over backwards to get things to work,
# I think we should either:
# (1) fix their schemas (BC-breaking)
# (2) hand-write their functionalization kernels
if str(g.func.name) not in MUTABLE_OPS_NOT_USING_FUNCTIONALIZATION:
assert g.has_composite_implicit_autograd_kernel or not modifies_arguments(g)
return []
else:
# Case 2: emit inplace -> out-of-place kernels for the functionalization pass
mutation_defs = []
mutation_defs.append(emit_inplace_functionalization_body(g.out, g))
if g.inplace is not None:
mutation_defs.append(emit_inplace_functionalization_body(g.inplace, g))
if g.mutable is not None:
mutation_defs.append(emit_inplace_functionalization_body(g.mutable, g))
return mutation_defs
return []
| pytorch-master | torchgen/gen_functionalization_type.py |
import re
from typing import Mapping, Match, Optional, Sequence
# match $identifier or ${identifier} and replace with value in env
# If this identifier is at the beginning of whitespace on a line
# and its value is a list then it is treated as
# block substitution by indenting to that depth and putting each element
# of the list on its own line
# if the identifier is on a line starting with non-whitespace and a list
# then it is comma separated ${,foo} will insert a comma before the list
# if this list is not empty and ${foo,} will insert one after.
class CodeTemplate:
substitution_str = r"(^[^\n\S]*)?\$([^\d\W]\w*|\{,?[^\d\W]\w*\,?})"
substitution = re.compile(substitution_str, re.MULTILINE)
pattern: str
filename: str
@staticmethod
def from_file(filename: str) -> "CodeTemplate":
with open(filename, "r") as f:
return CodeTemplate(f.read(), filename)
def __init__(self, pattern: str, filename: str = "") -> None:
self.pattern = pattern
self.filename = filename
def substitute(
self, env: Optional[Mapping[str, object]] = None, **kwargs: object
) -> str:
if env is None:
env = {}
def lookup(v: str) -> object:
assert env is not None
return kwargs[v] if v in kwargs else env[v]
def indent_lines(indent: str, v: Sequence[object]) -> str:
return "".join(
[indent + l + "\n" for e in v for l in str(e).splitlines()]
).rstrip()
def replace(match: Match[str]) -> str:
indent = match.group(1)
key = match.group(2)
comma_before = ""
comma_after = ""
if key[0] == "{":
key = key[1:-1]
if key[0] == ",":
comma_before = ", "
key = key[1:]
if key[-1] == ",":
comma_after = ", "
key = key[:-1]
v = lookup(key)
if indent is not None:
if not isinstance(v, list):
v = [v]
return indent_lines(indent, v)
elif isinstance(v, list):
middle = ", ".join([str(x) for x in v])
if len(v) == 0:
return middle
return comma_before + middle + comma_after
else:
return str(v)
return self.substitution.sub(replace, self.pattern)
if __name__ == "__main__":
c = CodeTemplate(
"""\
int foo($args) {
$bar
$bar
$a+$b
}
int commatest(int a${,stuff})
int notest(int a${,empty,})
"""
)
print(
c.substitute(
args=["hi", 8],
bar=["what", 7],
a=3,
b=4,
stuff=["things...", "others"],
empty=[],
)
)
| pytorch-master | torchgen/code_template.py |
"""torchgen
This module contains codegeneration utilities for PyTorch. It is used to
build PyTorch from source, but may also be used for out-of-tree projects
that extend PyTorch.
Note well that we provide no BC guarantees for torchgen. If you're interested
in using torchgen and want the PyTorch team to be aware, please reach out
on GitHub.
"""
| pytorch-master | torchgen/__init__.py |
import dataclasses
import itertools
import re
from dataclasses import dataclass
from enum import auto, Enum
from typing import Callable, Dict, Iterator, List, Optional, Sequence, Set, Tuple, Union
from torchgen.utils import assert_never, NamespaceHelper, OrderedSet
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# DATA MODEL
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# Some general principles for our data model.
#
# - Stop using C++ data types as the internal data representation
# format. Instead, the internal data structures are centered
# around JIT schema representation. This avoid a big problem
# with the old codegen where we read in all the types from
# native_functions.yaml and then immediately had to retranslate
# them into C++ types.
#
# - More semantic data representation. Instead of representing
# everything as dicts and strings, we define dataclasses for
# every interesting entity the code generation has to deal with.
# These dataclasses have strong semantic invariants: for example,
# we generally require them to roundtrip losslessly into the
# form they were parsed from. These structures are immutable
# and you're expected to populate information once during
# construction.
# Represent a source location; used for better error reporting
@dataclass(frozen=True)
class Location:
file: str
line: int
def __str__(self) -> str:
return "{}:{}".format(self.file, self.line)
# Valid values of the 'variants' field in native_functions.yaml
Variant = Enum("Variant", ("function", "method"))
# Default kernel namespace
DEFAULT_KERNEL_NAMESPACE = "at::native"
# NOTE: Keep the list in sync with `DispatchKey` in c10/core/DispatchKey.h
BACKEND_COMPONENTS = "CPU CUDA HIP XLA MPS IPU XPU HPU VE Lazy Meta PrivateUse1 PrivateUse2 PrivateUse3".split()
FUNCTIONALITY_KEYS = ["", "Quantized", "Sparse", "NestedTensor", "Autograd"]
# This list guards dispatches that can be used in derivatives.yaml
# For now we omit AutogradFunctionality and AutogradOther
AUTOGRAD_KEYS = ["AutogradNestedTensor"] + [
"Autograd" + component for component in BACKEND_COMPONENTS
]
# This doesn't have to be in sync with the header, it only needs to contain
# entries that we actually use in the codegen
class DispatchKey(Enum):
Undefined = 0
CatchAll = Undefined
FPGA = auto()
ORT = auto()
Vulkan = auto()
Metal = auto()
MKLDNN = auto()
OpenGL = auto()
OpenCL = auto()
IDEEP = auto()
CustomRNGKeyId = auto()
MkldnnCPU = auto()
Sparse = auto()
SparseCsrCPU = auto()
SparseCsrCUDA = auto()
ZeroTensor = auto()
BackendSelect = auto()
Named = auto()
AutogradOther = auto()
AutogradFunctionality = auto()
AutogradNestedTensor = auto()
Tracer = auto()
Autocast = auto()
Batched = auto()
VmapMode = auto()
TESTING_ONLY_GenericWrapper = auto()
TESTING_ONLY_GenericMode = auto()
Autograd = auto()
CompositeImplicitAutograd = auto()
CompositeExplicitAutograd = auto()
CompositeExplicitAutogradNonFunctional = auto()
# BEGIN autogenerated
CPU = auto()
CUDA = auto()
HIP = auto()
XLA = auto()
MPS = auto()
IPU = auto()
XPU = auto()
HPU = auto()
VE = auto()
Lazy = auto()
Meta = auto()
PrivateUse1 = auto()
PrivateUse2 = auto()
PrivateUse3 = auto()
QuantizedCPU = auto()
QuantizedCUDA = auto()
QuantizedHIP = auto()
QuantizedXLA = auto()
QuantizedMPS = auto()
QuantizedIPU = auto()
QuantizedXPU = auto()
QuantizedHPU = auto()
QuantizedVE = auto()
QuantizedLazy = auto()
QuantizedMeta = auto()
QuantizedPrivateUse1 = auto()
QuantizedPrivateUse2 = auto()
QuantizedPrivateUse3 = auto()
SparseCPU = auto()
SparseCUDA = auto()
SparseHIP = auto()
SparseXLA = auto()
SparseMPS = auto()
SparseIPU = auto()
SparseXPU = auto()
SparseHPU = auto()
SparseVE = auto()
SparseLazy = auto()
SparseMeta = auto()
SparsePrivateUse1 = auto()
SparsePrivateUse2 = auto()
SparsePrivateUse3 = auto()
NestedTensorCPU = auto()
NestedTensorCUDA = auto()
NestedTensorHIP = auto()
NestedTensorXLA = auto()
NestedTensorMPS = auto()
NestedTensorIPU = auto()
NestedTensorXPU = auto()
NestedTensorHPU = auto()
NestedTensorVE = auto()
NestedTensorLazy = auto()
NestedTensorMeta = auto()
NestedTensorPrivateUse1 = auto()
NestedTensorPrivateUse2 = auto()
NestedTensorPrivateUse3 = auto()
AutogradCPU = auto()
AutogradCUDA = auto()
AutogradHIP = auto()
AutogradXLA = auto()
AutogradMPS = auto()
AutogradIPU = auto()
AutogradXPU = auto()
AutogradHPU = auto()
AutogradVE = auto()
AutogradLazy = auto()
AutogradMeta = auto()
AutogradPrivateUse1 = auto()
AutogradPrivateUse2 = auto()
AutogradPrivateUse3 = auto()
# END autogenerated
def __str__(self) -> str:
return self.name
def lower(self) -> str:
return str(self).lower()
@staticmethod
def parse(value: str) -> "DispatchKey":
for k, v in DispatchKey.__members__.items():
if k == value:
return v
raise AssertionError(f"unknown dispatch key {value}")
def codegen_per_backend_entries() -> str:
r = []
for fk in FUNCTIONALITY_KEYS:
for bc in BACKEND_COMPONENTS:
r.append(f" {fk}{bc} = auto()")
return "\n".join(r)
for fk in FUNCTIONALITY_KEYS:
for bc in BACKEND_COMPONENTS:
if not hasattr(DispatchKey, fk + bc):
r = codegen_per_backend_entries()
print(r)
raise RuntimeError(
f"Missing {fk}{bc} from DispatchKey enum. Here is the autogenerated list we expect to have:\n\n{r}"
)
STRUCTURED_DISPATCH_KEYS = {DispatchKey.MPS, DispatchKey.CUDA, DispatchKey.CPU}
UFUNC_DISPATCH_KEYS = {DispatchKey.CUDA, DispatchKey.CPU}
# Set of supported dispatch keys
dispatch_keys = [
DispatchKey.CPU,
DispatchKey.SparseCPU,
DispatchKey.SparseCsrCPU,
DispatchKey.MkldnnCPU,
DispatchKey.CUDA,
DispatchKey.MPS,
DispatchKey.SparseCUDA,
DispatchKey.SparseCsrCUDA,
DispatchKey.QuantizedCPU,
DispatchKey.QuantizedCUDA,
DispatchKey.CompositeImplicitAutograd,
DispatchKey.CompositeExplicitAutograd,
DispatchKey.CompositeExplicitAutogradNonFunctional,
DispatchKey.NestedTensorCPU,
DispatchKey.NestedTensorCUDA,
# Meta is a magic key: it is automatically generated for structured
# kernels
DispatchKey.Meta,
DispatchKey.SparseMeta,
DispatchKey.QuantizedMeta,
DispatchKey.NestedTensorMeta,
DispatchKey.ZeroTensor,
]
# Dispatch keys that "support all backends". These codegen slightly differently
# then backend specific keys.
def is_generic_dispatch_key(dk: DispatchKey) -> bool:
return dk in {
DispatchKey.CompositeExplicitAutograd,
DispatchKey.CompositeExplicitAutogradNonFunctional,
DispatchKey.CompositeImplicitAutograd,
}
# CUDA specific dispatch keys
def is_cuda_dispatch_key(dk: DispatchKey) -> bool:
return dk in {
DispatchKey.CUDA,
DispatchKey.QuantizedCUDA,
DispatchKey.SparseCUDA,
DispatchKey.SparseCsrCUDA,
DispatchKey.NestedTensorCUDA,
DispatchKey.AutogradCUDA,
}
# Structured kernel generation is only supported for certain key types;
# otherwise use old-style
def is_structured_dispatch_key(dk: DispatchKey) -> bool:
return dk in STRUCTURED_DISPATCH_KEYS
def is_ufunc_dispatch_key(dk: DispatchKey) -> bool:
# For now, ufunc dispatch keys coincide with structured keys
return dk in UFUNC_DISPATCH_KEYS
# This is oddly named ScalarType and not DType for symmetry with C++
class ScalarType(Enum):
Byte = auto()
Char = auto()
Short = auto()
Int = auto()
Long = auto()
Half = auto()
Float = auto()
Double = auto()
ComplexHalf = auto()
ComplexFloat = auto()
ComplexDouble = auto()
Bool = auto()
BFloat16 = auto()
def __str__(self) -> str:
return self.name
@staticmethod
def maybe_parse(value: str) -> Optional["ScalarType"]:
for k, v in ScalarType.__members__.items():
if k == value:
return v
return None
@staticmethod
def parse(value: str) -> "ScalarType":
mb_r = ScalarType.maybe_parse(value)
assert mb_r is not None, f"unknown dtype {value}"
return mb_r
@staticmethod
def parse_set(values: str) -> OrderedSet["ScalarType"]:
dtypes: OrderedSet[ScalarType] = OrderedSet()
for value in values.split(", "):
if value in DTYPE_CLASSES:
dtypes.update(DTYPE_CLASSES[value])
else:
dtypes.add(ScalarType.parse(value))
return dtypes
DTYPE_CLASSES: Dict[str, OrderedSet[ScalarType]] = {}
# NB: Integral doesn't include boolean
DTYPE_CLASSES["Integral"] = OrderedSet(
[
ScalarType.Byte,
ScalarType.Char,
ScalarType.Int,
ScalarType.Long,
ScalarType.Short,
]
)
# NB: Floating doesn't include low precision types
DTYPE_CLASSES["Floating"] = OrderedSet([ScalarType.Float, ScalarType.Double])
DTYPE_CLASSES["Complex"] = OrderedSet(
[ScalarType.ComplexFloat, ScalarType.ComplexDouble]
)
DTYPE_CLASSES["All"] = DTYPE_CLASSES["Integral"] | DTYPE_CLASSES["Floating"]
DTYPE_CLASSES["AllAndComplex"] = DTYPE_CLASSES["All"] | DTYPE_CLASSES["Complex"]
DTYPE_CLASSES["FloatingAndComplex"] = (
DTYPE_CLASSES["Floating"] | DTYPE_CLASSES["Complex"]
)
# Represents the valid entries for ufunc_inner_loop in native_functions.yaml.
# NB: if you add a new UfuncKey, you will teach torchgen.dest.ufunc how
# to process it. Most logic will ignore keys they don't understand, so your
# new key will get silently ignored until you hook in logic to deal with it.
class UfuncKey(Enum):
# These are low level keys that represent exactly one particular
# instantiation of the kernel produced by codegen
CUDAFunctor = auto()
CUDAFunctorOnOther = auto()
CUDAFunctorOnSelf = auto()
CPUScalar = auto()
CPUVector = auto()
# These are the ones users will usually specify, and
# implicitly "fill in" the low level keys
ScalarOnly = auto() # CUDA*, CPUScalar
Generic = auto() # CUDA*, CPU*
def __str__(self) -> str:
return self.name
@staticmethod
def parse(value: str) -> "UfuncKey":
for k, v in UfuncKey.__members__.items():
if k == value:
return v
raise AssertionError(f"unknown ufunc key {value}")
class DeviceCheckType(Enum):
NoCheck = 0
ExactSame = 1
ViewSchemaKind = Enum(
"ViewSchemaKind", ("aliasing", "aliasing_inplace", "non_aliasing")
)
# The basic input to the code generation is native_functions.yaml.
# The name "native", BTW, comes from the distinction between native
# functions and legacy TH functions. The legacy TH functions are gone,
# but the "native" descriptor has stuck.
#
# NativeFunction models a single entry in native_functions.yaml. Its
# fields roughly correspond to what you would see in the YAML itself,
# but after canonicalization and parsing has occurred.
#
# You can see some of the overall design patterns for how we setup
# dataclasses in this class, but we will defer a complete discussion
# of this at FunctionSchema.
@dataclass(frozen=True)
class NativeFunction:
# The namespace for this operator. For example, if we have "at::add"
# then the namespace would be "at". This enables ops to be registered
# through the same DSL with a custom namespace. If not specified, the
# default namespace would be "at".
namespace: str
# The function schema of the operator in question. This schema
# has been parsed; see FunctionSchema for more about its structure.
# (This type is quoted as we are forward referencing a type
# defined later in the file. I opted for this ordering of the
# classes for expository clarity.)
func: "FunctionSchema"
# Whether or not to generate mutable tensor arguments like regular
# ones
use_const_ref_for_mutable_tensors: bool
# Whether or not to omit automatic generation of a DeviceGuard
device_guard: bool
# How to emit automatic generation of device check
device_check: DeviceCheckType
# What python module to put the function in
python_module: Optional[str]
# TODO: figure out what this does
category_override: Optional[str]
# If no variants are specified in native_functions.yaml, this is
# assumed to be {'function'}.
variants: Set[Variant]
# Whether or not we should skip generating registrations for
# this kernel. This is a bit of a double-edged sword, as manual
# registrations don't participate in codegen-based selective build!
manual_kernel_registration: bool
# Whether or not to skip generating TensorMethod/Functions bindings
# for this kernel. Technically, this doesn't actually skip generating
# the binding; instead, the binding gets generated to __dispatch_{funcname}
# so you can make use of the normal binding if you need it.
manual_cpp_binding: bool
# The location in the YAML file were this native function entry was
# defined. This is for conveniently reporting error messages!
loc: "Location"
# A list of operators that are expected to be auto-generated for this NativeFunction.
# Note: This list isn't actually directly used by the codegen to generate anything.
# Instead, the codegen figures out what operators to generate purely based off of
# function schema, and uses the autogen declarations to error check.
# We expect every NativeFunction that gets auto-generated be explicitly called out
# in native_functions.yaml
autogen: List["OperatorName"]
# If non-empty, this kernel is subject to ufunc codegen.
# Sorted by ufunc_key
ufunc_inner_loop: Dict[UfuncKey, "UfuncInnerLoop"]
# Whether or not this out functions is a "structured kernel". Structured
# kernels are defined a little differently from normal kernels; in
# particular, their shape checking logic is defined separately from
# the kernel. Only out functions can be structured; other functions
# delegate to the out function using the structured_delegate keyword.
# Every structured kernel must have at least an out and a functional
# variant.
structured: bool
# Whether or not this non-out function is a structured kernel, defined
# in terms of the out kernel referenced by the string here.
structured_delegate: Optional["OperatorName"]
# Only valid for structured kernels. Specifies alternative of what
# to inherit from when defining the meta class for the structured
# operator. This will usually be TensorIteratorBase. This also
# changes the semantics of set_output to call the parent class.
structured_inherits: Optional[str]
# Structured kernels can declare elements as "precomputed". These elements
# are returned by the meta function in one struct and passed to the impl
# function in lieu of certain kernel arguments that these precomputed
# elements supersede. Information about the names and types of these
# precomputed elements and how they correspond to kernel arguments is stored
# in this member, if applicable.
precomputed: Optional["Precompute"]
# Argument names whose default should be excluded from the C++ interface.
# Intended for resolving overload ambiguities between signatures.
cpp_no_default_args: Set[str]
# Note [Abstract ATen methods]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# An abstract ATen method is one whose dispatch differs between
# types. These are implemented in derived types (with a
# standard (throwing) definition in Type). A concrete ATen
# method is one which has the same dispatch for all types;
# we just implement it in the base Type. This is exposed
# in Declarations.yaml via a field named 'abstract'.
is_abstract: bool
# Whether or not the NativeFunction contains a backend-agnostic kernel
has_composite_implicit_autograd_kernel: bool
has_composite_explicit_autograd_kernel: bool
has_composite_explicit_autograd_non_functional_kernel: bool
# Tags are used to describe semantic information about (groups of) operators,
# That aren't easily inferrable directly from the operator's schema.
tags: Set[str]
# NB: The benefit of defining a dataclass is that we automatically get
# a constructor defined for all the fields we specify. No need
# to explicitly write it out.
# We parse both the NativeFunction + backend-specific information about it, which it stored in a corresponding BackendIndex.
@staticmethod
def from_yaml(
ei: Dict[str, object],
loc: "Location",
valid_tags: Set[str],
ignore_keys: Optional[Set[DispatchKey]] = None,
) -> Tuple[
"NativeFunction", Dict[DispatchKey, Dict["OperatorName", "BackendMetadata"]]
]:
"""
Parse a NativeFunction from a dictionary as directly parsed
from native_functions.yaml
"""
e = ei.copy()
funcs = e.pop("func")
assert isinstance(funcs, str), f"not a str: {funcs}"
# only support one level of namespace. E.g., aten::add
namespace_helper = NamespaceHelper.from_namespaced_entity(
namespaced_entity=funcs, max_level=1
)
namespace = namespace_helper.get_cpp_namespace(default="aten")
func = FunctionSchema.parse(namespace_helper.entity_name)
cpp_no_default_args_list = e.pop("cpp_no_default_args", [])
assert isinstance(cpp_no_default_args_list, list)
cpp_no_default_args = set(cpp_no_default_args_list)
use_const_ref_for_mutable_tensors = e.pop(
"use_const_ref_for_mutable_tensors", False
)
assert isinstance(use_const_ref_for_mutable_tensors, bool)
variants_s = e.pop("variants", "function")
assert isinstance(variants_s, str)
variants: Set[Variant] = set()
for v in variants_s.split(", "):
if v == "function":
variants.add(Variant.function)
elif v == "method":
variants.add(Variant.method)
else:
raise AssertionError(f"illegal variant {v}")
manual_kernel_registration = e.pop("manual_kernel_registration", False)
assert isinstance(
manual_kernel_registration, bool
), f"not a bool: {manual_kernel_registration}"
manual_cpp_binding = e.pop("manual_cpp_binding", False)
assert isinstance(manual_cpp_binding, bool), f"not a bool: {manual_cpp_binding}"
device_guard = e.pop("device_guard", True)
assert isinstance(device_guard, bool), f"not a bool: {device_guard}"
device_check_s = e.pop("device_check", None)
assert device_check_s is None or isinstance(
device_check_s, str
), f"not a str: {device_check_s}"
device_check: DeviceCheckType
if device_check_s is None:
device_check = DeviceCheckType.ExactSame
else:
device_check = DeviceCheckType[device_check_s]
structured = e.pop("structured", False)
assert isinstance(structured, bool), f"not a bool: {structured}"
structured_delegate_s = e.pop("structured_delegate", None)
assert structured_delegate_s is None or isinstance(
structured_delegate_s, str
), f"not a str: {structured_delegate_s}"
assert structured_delegate_s is None or "::" not in structured_delegate_s, (
"namespace is not supported in structured delegate,"
" using the same namespace as the native function"
)
structured_delegate: Optional[OperatorName] = None
if structured_delegate_s is not None:
structured_delegate = OperatorName.parse(structured_delegate_s)
structured_inherits = e.pop("structured_inherits", None)
assert structured_inherits is None or isinstance(
structured_inherits, str
), f"not a str: {structured_inherits}"
assert structured_inherits is None or "::" not in structured_inherits, (
"namespace is not supported in structured inherits,"
" using the same namespace as the native function"
)
python_module = e.pop("python_module", None)
assert python_module is None or isinstance(
python_module, str
), f"not a str: {python_module}"
assert (
python_module is None or Variant.method not in variants
), "functions in modules cannot be methods"
category_override = e.pop("category_override", None)
assert category_override is None or isinstance(
category_override, str
), f"not a str: {category_override}"
precomputed_dict = e.pop("precomputed", None)
assert precomputed_dict is None or structured is True
precomputed = Precompute.parse(precomputed_dict) if precomputed_dict else None
tags_s = e.pop("tags", "")
assert isinstance(tags_s, str)
tags: Set[str] = set()
if len(tags_s) > 0:
assert len(valid_tags) > 0
for t in tags_s.split(", "):
# TODO: verify that the tag is valid and has an entry in tags.yaml
if t in valid_tags:
tags.add(t)
else:
raise AssertionError(f"illegal tag {t}")
assert isinstance(tags, set)
from torchgen.api import cpp
raw_dispatch = e.pop("dispatch", None)
assert raw_dispatch is None or isinstance(raw_dispatch, dict), e
dispatch: Dict[DispatchKey, BackendMetadata] = {}
if raw_dispatch is not None:
assert not manual_kernel_registration, (
"cannot specify both manual_kernel_registration and dispatch; with "
"manual registration, dispatch has no effect!"
)
redundant_composite_implicit_autograd = False
for ks, v in raw_dispatch.items():
if ks == "__line__":
continue # not worth tracking line numbers for dispatch entries
assert isinstance(ks, str), e
for k in ks.split(","):
dispatch_key = DispatchKey.parse(k.strip())
if ignore_keys and dispatch_key in ignore_keys:
continue
assert dispatch_key in dispatch_keys, (
f"Dispatch key {dispatch_key} of kernel {v} "
"is not a supported dispatch key."
)
# We only allow at most 2 levels of namespace for kernels.
# We will append "native" to a custom kernel namespace.
namespace_helper = NamespaceHelper.from_namespaced_entity(
v, max_level=2
)
kernel_namespace = namespace_helper.get_cpp_namespace(default="at")
# Why is 'structured' included? External backends (e.g.
# XLA) opt into which ops are structured independently
# of which in-tree ops are structured
dispatch[dispatch_key] = BackendMetadata(
kernel=namespace_helper.entity_name,
structured=structured
and is_structured_dispatch_key(dispatch_key),
cpp_namespace=(kernel_namespace + "::native"),
)
if (
dispatch_key is DispatchKey.CompositeImplicitAutograd
and v == cpp.name(func)
):
redundant_composite_implicit_autograd = True
assert not (len(dispatch) == 1 and redundant_composite_implicit_autograd), (
"unnecessary dispatch table for this function; just delete the dispatch "
"key entirely"
)
# if a function is a structured delegate, deleting the dispatch
# table is NOT semantics preserving
assert structured_delegate or dispatch.keys() != {
DispatchKey.CompositeImplicitAutograd
}, (
f"unexpected name for singleton CompositeImplicitAutograd dispatch entry: expected {cpp.name(func)} "
f"but got {dispatch[DispatchKey.CompositeImplicitAutograd]}. Rename your implementation to the expected "
"name, then delete the dispatch table"
)
elif not structured and structured_delegate is None:
name = str(func.name.name)
assert not (
name.startswith("new_")
or name.endswith("_like")
# TODO: maybe it's better to test the return
or (
func.arguments.tensor_options
and not func.arguments.has_tensor_arg()
)
), (
f"expected {name} to have a CompositeExplicitAutograd "
"dispatch entry, but there was no dispatch table. Factory functions "
"should not have implicit dispatch as they should not be decomposed "
"for __torch_dispatch__"
)
dispatch[DispatchKey.CompositeImplicitAutograd] = BackendMetadata(
cpp.name(func), structured=False, cpp_namespace=DEFAULT_KERNEL_NAMESPACE
)
composites_in_dispatch = [
d
for d in dispatch
if d == DispatchKey.CompositeExplicitAutograd
or d == DispatchKey.CompositeExplicitAutogradNonFunctional
or d == DispatchKey.CompositeImplicitAutograd
]
assert len(composites_in_dispatch) <= 1, (
"cannot specify more than one of CompositeExplicitAutograd, CompositeExplicitAutogradNonFunctional, "
"or CompositeImplicitAutograd on a single kernel; each "
"strictly subsumes the other. If you wanted to provide an explicit autograd "
"implementation, specify CompositeExplicitAutograd; otherwise specify CompositeImplicitAutograd only"
)
autogen_str = e.pop("autogen", "")
assert isinstance(autogen_str, str)
autogen = (
[]
if autogen_str == ""
else [OperatorName.parse(x) for x in autogen_str.split(", ")]
)
raw_ufunc_inner_loop = e.pop("ufunc_inner_loop", {})
ufunc_inner_loop = {}
if isinstance(raw_ufunc_inner_loop, str):
ufunc_inner_loop[UfuncKey.Generic] = UfuncInnerLoop.parse(
raw_ufunc_inner_loop, UfuncKey.Generic
)
elif isinstance(raw_ufunc_inner_loop, dict):
for k, vo in raw_ufunc_inner_loop.items():
if k == "__line__":
continue
assert isinstance(k, str), f"ufunc_inner_loop key is not a str: {k}"
assert isinstance(vo, str), f"ufunc_inner_loop value is not a str: {v}"
ufunc_key = UfuncKey.parse(k)
ufunc_inner_loop[ufunc_key] = UfuncInnerLoop.parse(vo, ufunc_key)
else:
raise AssertionError(
f"ufunc_inner_loop not str or dict: {raw_ufunc_inner_loop}"
)
# Program the BackendIndex for the implicit dispatch entry from ufunc
if ufunc_inner_loop:
assert structured, "ufunc must be structured"
# Delay import ufunc here to avoid circular import issue
# See: https://github.com/pytorch/pytorch/issues/81294
import torchgen.api.ufunc as ufunc
for dispatch_key in UFUNC_DISPATCH_KEYS:
assert (
dispatch_key not in dispatch
), f"ufunc should not have explicit dispatch entry for {dispatch_key}"
dispatch[dispatch_key] = BackendMetadata(
kernel=ufunc.schema_kernel_name(func, dispatch_key),
structured=True,
cpp_namespace=DEFAULT_KERNEL_NAMESPACE,
)
if structured_delegate:
# Structured functions MUST have a dispatch table
is_abstract = True
else:
is_abstract = dispatch.keys() != {DispatchKey.CompositeImplicitAutograd}
has_composite_implicit_autograd_kernel = (
DispatchKey.CompositeImplicitAutograd in dispatch.keys()
)
has_composite_explicit_autograd_kernel = (
DispatchKey.CompositeExplicitAutograd in dispatch.keys()
)
has_composite_explicit_autograd_non_functional_kernel = (
DispatchKey.CompositeExplicitAutogradNonFunctional in dispatch.keys()
)
# We aren't going to store dispatch metadata inline in NativeFunctions;
# instead it is separately indexed by backend (so other backends can
# add more dispatch entries after the fact). Reindex the individual
# metadata by OperatorName!
backend_metadata = {k: {func.name: v} for k, v in dispatch.items()}
# don't care if it exists or not; make it easier to use this function
# with other yaml parsers that aren't setting __line__ in the dict
e.pop("__line__", None)
assert not e, f"leftover entries: {e}"
# Asserts that we can't do in post_init, because they rely on backend-specific info
if structured_delegate is not None:
for key in STRUCTURED_DISPATCH_KEYS:
assert key not in dispatch, (
f"if structured_delegate, then must not have {key} in dispatch dictionary "
"(it is delegated!)"
)
return (
NativeFunction(
func=func,
use_const_ref_for_mutable_tensors=use_const_ref_for_mutable_tensors,
variants=variants,
structured=structured,
structured_delegate=structured_delegate,
structured_inherits=structured_inherits,
precomputed=precomputed,
autogen=autogen,
ufunc_inner_loop=ufunc_inner_loop,
manual_kernel_registration=manual_kernel_registration,
manual_cpp_binding=manual_cpp_binding,
python_module=python_module,
category_override=category_override,
device_guard=device_guard,
device_check=device_check,
loc=loc,
cpp_no_default_args=cpp_no_default_args,
is_abstract=is_abstract,
has_composite_implicit_autograd_kernel=has_composite_implicit_autograd_kernel,
has_composite_explicit_autograd_kernel=has_composite_explicit_autograd_kernel,
has_composite_explicit_autograd_non_functional_kernel=has_composite_explicit_autograd_non_functional_kernel,
tags=tags,
namespace=namespace,
),
backend_metadata,
)
def symints_to_ints(self) -> "NativeFunction":
return dataclasses.replace(self, func=self.func.symints_to_ints())
def validate_unstructured(self) -> None:
# TODO: probably better to accumulate these errors and report them all
# at once
assert not self.structured, (
"This function is structured, but there was "
"no valid functional variant of it."
)
assert self.structured_delegate, (
"This function delegates to another structured out function, "
"but no valid function was found (the delegate may not exist, or it has the wrong type)"
)
# __post_init__ functions in dataclasses can be used to do extra
# validation after construction.
#
# Notice that we don't do any type validation here. In fact, we
# rely exclusively on mypy to check if you've done types correctly!
# Validation is for nontrivial invariants that cannot be (conveniently)
# encoded in the type system.
def __post_init__(self) -> None:
if self.func.arguments.out:
assert self.variants == {Variant.function}, (
"Native functions with out arguments MUST "
"be declared with only function variant; e.g., variants: function; "
"otherwise you will tickle a Python argument binding bug "
"(which usually manifests itself as the result variable being undefined.)"
)
if self.structured:
assert self.func.kind() == SchemaKind.out, (
"Put structured field on the out= "
"variant of a function; did you mean structured_delegate?"
)
assert (
self.device_guard
), "device_guard: False is not respected by structured kernels"
if self.structured_delegate:
assert self.func.kind() != SchemaKind.out, (
"structured_delegate field not allowed "
"on out= functions; did you mean structured?"
)
assert (
self.device_guard
), "device_guard: False is not respected by structured kernels"
# Technically, with the asserts above, this assert is impossible to
# happen
assert not (
self.structured and self.structured_delegate
), "Cannot have both structured and structured_delegate on function"
defaulted_arguments = {
a.name for a in self.func.schema_order_arguments() if a.default is not None
}
invalid_args = set.difference(self.cpp_no_default_args, defaulted_arguments)
assert len(invalid_args) == 0, f"Invalid cpp_no_default_args: {invalid_args}"
if self.structured_inherits is not None:
assert (
self.structured
), "structured_inherits must also imply structured: True"
if str(self.func.name).startswith("_foreach"):
assert self.device_check == DeviceCheckType.NoCheck, (
"foreach kernels fall back to slow path when tensor are on different devices, "
"device_check not allowed to be enabled"
)
@property
def has_composite_kernel(self) -> bool:
return (
self.has_composite_implicit_autograd_kernel
or self.has_composite_explicit_autograd_kernel
or self.has_composite_explicit_autograd_non_functional_kernel
)
@property
def is_view_op(self) -> bool:
rets = self.func.returns
is_non_mutating_view = len(rets) > 0 and any(
r.annotation is not None and not r.annotation.is_write for r in rets
)
# See Note [resize_ in Functionalization] for more dtails
is_inplace_view = (
"inplace_view" in self.tags and str(self.func.name) != "resize_"
)
is_wildcard_view = any(
inp.annotation is not None and inp.annotation.alias_set_after != ""
for inp in self.func.schema_order_arguments()
)
return is_non_mutating_view or is_inplace_view or is_wildcard_view
@property
def view_schema_kind(self) -> ViewSchemaKind:
if self.is_view_op and self.func.name.name.inplace:
assert "inplace_view" in self.tags
return ViewSchemaKind.aliasing_inplace
if self.is_view_op:
return ViewSchemaKind.aliasing
else:
return ViewSchemaKind.non_aliasing
@property
def root_name(self) -> str:
return self.func.name.name.base
SchemaKind = Enum("SchemaKind", ("functional", "inplace", "out", "mutable", "scratch"))
# A structured kernel is guaranteed to have a functional and out variant, and
# optionally an inplace variant.
#
# NB: we create NativeFunctionsGroup *even if* the function is not
# actually annotated structured. Test the structured boolean to see if it
# actually is structured or not.
@dataclass(frozen=True)
class NativeFunctionsGroup:
functional: NativeFunction
inplace: Optional[NativeFunction]
mutable: Optional[NativeFunction]
out: NativeFunction
@property
def structured(self) -> bool:
# Whether or not the operator has a meta() function. This information is backend-agnostic.
return self.out.structured
def __post_init__(self) -> None:
test_sig: FunctionSchema = self.functional.func.signature()
for f in self.functions():
if test_sig != f.func.signature():
raise AssertionError(
"NativeFunctionsGroup constructed from two NativeFunctions "
f"that don't have matching signatures: {test_sig} != {f.func.signature()}"
)
assert self.functional.func.kind() == SchemaKind.functional
assert self.out.func.kind() == SchemaKind.out
assert self.functional.namespace == self.out.namespace
if self.inplace is not None:
assert self.inplace.func.kind() == SchemaKind.inplace
assert self.inplace.namespace == self.functional.namespace
if self.mutable is not None:
assert self.mutable.func.kind() == SchemaKind.mutable
assert self.mutable.namespace == self.functional.namespace
# See Note [Overload Ambiguity With Functional Variants]
assert self.functional.func.name.name.functional_overload
if self.structured:
# For now, structured composite kernels are not supported (need some
# design work to figure out how to make the composite case work)
assert not self.out.has_composite_implicit_autograd_kernel
assert self.functional.structured_delegate == self.out.func.name, (
f"{self.functional.func.name} delegates to {self.functional.structured_delegate} "
f"but its actual delegate is {self.out.func.name}"
)
if self.inplace is not None:
assert self.inplace.structured_delegate == self.out.func.name
generated_fns = sorted(
[str(f.func.name) for f in self.functions() if "generated" in f.tags]
)
generated_fns_str = ", ".join(str(x) for x in generated_fns)
expected_generated_fns: Set[str] = set()
for f in self.functions():
expected_generated_fns.update(str(op) for op in f.autogen)
expected_generated_fns_str = ", ".join(
str(x) for x in sorted(list(expected_generated_fns))
)
if len(expected_generated_fns) == 0 and len(generated_fns) > 0:
raise RuntimeError(
f"The codegen expects to be able to generate '{generated_fns_str}'."
" In order to generate them however, we expect them to be called out explicitly in the yaml."
f" Please add an 'autogen: {generated_fns_str}' line to the entry for {str(f.func.name)}"
)
if expected_generated_fns_str != generated_fns_str:
raise RuntimeError(
f"The codegen expects to be able to generate '{generated_fns_str}'."
f" To do so, it expects a line: 'autogen: {generated_fns_str}'."
f" Instead, it found 'autogen: {expected_generated_fns_str}'"
)
def signature(self) -> "FunctionSchema":
return self.out.func.signature()
def functions(self) -> Iterator[NativeFunction]:
yield self.functional
yield self.out
if self.inplace is not None:
yield self.inplace
if self.mutable is not None:
yield self.mutable
@property
def root_name(self) -> str:
return self.functional.root_name
@staticmethod
def from_dict(
d: Dict[SchemaKind, NativeFunction]
) -> Optional["NativeFunctionsGroup"]:
assert d
if len(d) == 1:
return None
d = dict(d) # non-destructive updates please
functional = d.pop(SchemaKind.functional, None)
inplace = d.pop(SchemaKind.inplace, None)
mutable = d.pop(SchemaKind.mutable, None)
out = d.pop(SchemaKind.out, None)
assert not d
assert functional is not None
# There are a few operators which only have functional/inplace variants;
# these don't count as structured for our purposes here
if out is None:
return None
# assuming all variants have the same namespace
return NativeFunctionsGroup(
functional=functional,
inplace=inplace,
mutable=mutable,
out=out,
)
@dataclass(frozen=True)
class BackendMetadata:
# The name of the backend kernel, for a given operator
# for in-tree backends. These names come directly from the 'dispatch" field
# in native_functions.yaml. The dispatch entry is optional; in that
# case, that is equivalent to having written:
#
# dispatch:
# CompositeImplicitAutograd: $operator_name
kernel: str
# Whether or not the operator has a structured kernel implemented, for this particular backend.
# For in-tree backends, they all have the same value for structured- this is listed
# in native_functions.yaml.
# However, external backends like XLA can indendently toggle which ops are structured.
structured: bool
# The namespace for kernels, default value: DEFAULT_KERNEL_NAMESPACE
cpp_namespace: str
@dataclass(frozen=True)
class UfuncInnerLoop:
name: str
supported_dtypes: OrderedSet[ScalarType]
# key is stored here because it affects the semantics of name,
# so its helpful to have them together for further processing
ufunc_key: UfuncKey
@staticmethod
def parse(value: str, ufunc_key: UfuncKey) -> "UfuncInnerLoop":
name, supported_dtypes_str = value.split(" ", 1)
assert supported_dtypes_str[0] == "("
assert supported_dtypes_str[-1] == ")"
supported_dtypes: OrderedSet[ScalarType] = OrderedSet()
for k in supported_dtypes_str[1:-1].split(", "):
supported_dtypes |= ScalarType.parse_set(k)
return UfuncInnerLoop(
name=name, supported_dtypes=supported_dtypes, ufunc_key=ufunc_key
)
# BackendIndex represents a backend.
# The BackendIndex encodes per-operator information that is potentially different
# for each backend. The most obvious example is the name of the kernel
# (the 'dispatch' entry in native_functions.yaml).
# However, there can be other examples of different backends having different information.
# External backends can choose to opt their kernels to be structured independently from in-tree backends,
# which means that this information isn't inherentely tied to a NativeFunction- it's different per backend.
@dataclass(frozen=True)
class BackendIndex:
dispatch_key: DispatchKey
# Mainly important for structured kernels, this determines which variant in the operator group is used to implement the others.
# All in-tree ops use out kernels, while XLA uses functional kernels.
use_out_as_primary: bool
# Whether the backend requires a device guard, and device checks.
# For in-tree backends, this is currently just CUDA/HIP
# For out-of-tree backends, this is currently just Intel XPU
device_guard: bool
# Whether the backend is in-tree (CPU/CUDA) or out-of-tree (XLA)
external: bool
# Other backend-specific information that is on a per-operator basis
index: Dict["OperatorName", BackendMetadata]
@staticmethod
def grow_index(
parent_index: Dict[DispatchKey, Dict["OperatorName", BackendMetadata]],
child_index: Dict[DispatchKey, Dict["OperatorName", BackendMetadata]],
) -> None:
for k, v in child_index.items():
for op_name, metadata in v.items():
assert (
op_name not in parent_index[k]
), f"duplicate operator {op_name} for dispatch key {k}"
parent_index[k][op_name] = metadata
def primary(self, g: NativeFunctionsGroup) -> NativeFunction:
if self.use_out_as_primary:
return g.out
else:
return g.functional
def has_kernel(self, g: Union[NativeFunction, NativeFunctionsGroup]) -> bool:
m = self.get_kernel(g)
return m is not None
def get_kernel(
self, g: Union[NativeFunction, NativeFunctionsGroup]
) -> Optional[BackendMetadata]:
if isinstance(g, NativeFunction):
f = g
elif isinstance(g, NativeFunctionsGroup):
f = self.primary(g)
else:
assert_never(g)
if f.func.name not in self.index:
return None
return self.index[f.func.name]
def native_function_class_name(self) -> Optional[str]:
if self.external:
return f"{str(self.dispatch_key)}NativeFunctions"
else:
# TODO: This discrepancy isn't required; we could also generated
# a class for in-tree kernels. It'll just require carefully
# updating every kernel definition + callsite of every in-tree aten kernel.
return None
# The function schema is undoubtedly the most important data structure
# in all of the codegen, as it defines the type signature for operators,
# and most of the code generation we do is type directed (e.g., look at
# the types, decide what to do. Think about how we code generate
# C++ function stubs!)
#
# We will also see in this class the general structure for how we model
# data in this code generation. A few notable properties to point out
# ahead of time:
#
# - These dataclasses are a *lossless* representation of the strings
# they are parsed from. In fact, we assert that given the
# information stored in the dataclass, we can exactly reconstruct
# the string we parsed from (and assert this inside the parse
# definition). There are a few reasons for this:
#
# - If you find that it is difficult to reconstruct the string
# given a dataclass, that is a clue that you are data
# representation is wrong.
#
# - It helps ensure that all relevant information is present
# in the dataclass, so that downstream users aren't tempted
# to reparse the original string to get some information
# that was omitted.
#
# - It forces you to represent the data in-memory in the same way
# it is recorded textually, which makes the dataclasses easier
# to understand for someone who is familiar with the
# textual format. (As a tradeoff, it means you have to model
# the syntax, even when it is inconvenient. But maybe that means
# the syntax is bad!) If you don't understand the internal
# representation, go look at the printing code to see how
# it maps onto the surface syntax!
#
# - It makes it easy to test the parsing code, as parsing code
# that is inconsistent with the string code will fail early
# and loudly. (As a tradeoff, it makes the parsing code a bit
# brittle (in particular, with trivial whitespace changes you
# are likely to trigger an assert error).
#
# In general, try to make the __str__ code as simple as possible
# (even at the cost of more complex parsing logic.) Additionally,
# try to minimize redundancy in data representation. (Precomputed
# fields are OK though: they are defined as a simple function on
# the canonical representation in question.)
#
# - These dataclasses are all frozen; once constructed their
# values never change. This makes it easy to tell where any
# given data came from: just look to the constructor. As a
# tradeoff, you can't easily "decorate" a schema with extra
# information from a post-facto analysis. We impose this
# restriction to make these structures more understandable.
#
@dataclass(frozen=True)
class FunctionSchema:
# The name of the operator this function schema describes.
name: "OperatorName"
arguments: "Arguments"
# TODO: Need to handle collisions with argument names at some point
returns: Tuple["Return", ...]
def schema_order_arguments(self) -> Iterator["Argument"]:
return itertools.chain(
self.arguments.flat_positional,
self.arguments.flat_kwarg_only,
self.arguments.out,
)
decl_re = re.compile(r"(?P<name>[^\(]+)\((?P<args>.*)\) -> (?P<returns>.*)")
def symints_to_ints(self) -> "FunctionSchema":
return dataclasses.replace(self, arguments=self.arguments.symints_to_ints())
@staticmethod
def parse(func: str) -> "FunctionSchema":
# We should probably get a proper parser here
decls = FunctionSchema.decl_re.findall(func)
assert len(decls) == 1, f"Invalid function schema: {func}"
ops, args, return_decl = decls[0]
name = OperatorName.parse(ops)
arguments = Arguments.parse(args)
returns = parse_returns(return_decl)
r = FunctionSchema(name=name, arguments=arguments, returns=returns)
assert str(r) == func, f"{str(r)} != {func}"
return r
def returns_are_aliased(self) -> bool:
# We assert earlier that schemas can't have a mix of aliased and non-aliased returns
return any(
r
for r in self.returns
if r.annotation is not None and r.annotation.is_write
)
def __post_init__(self) -> None:
for arg, ret in zip(self.arguments.out, self.returns):
assert arg.annotation == ret.annotation, (
"Out arguments must have matching return Tensor; furthermore, "
"the ith-argument needs to correspond to the ith return"
)
# We also enforce that if you have any mutable, positional args, then they are not returned.
# This makes it easier to group these functions properly with their functional/out= counterparts.
for a in self.arguments.post_self_positional_mutable:
assert not any(
a.annotation == r.annotation for r in self.returns
), f"If you have a schema with mutable positional args, we expect them to not be returned. schema: {str(self)}"
# Invariant: we expect out arguments to appear as keyword arguments in the schema.
# This means that all mutable returns should be aliased to a keyword argument
# (except for "self", which we explicitly don't treat as an out argument because of its use in methods)
# See Note [is_out_fn]
out_and_self = list(self.arguments.out) + [
arg for arg in self.arguments.flat_positional if arg.name == "self"
]
mutable_returns = [
ret
for ret in self.returns
if ret.annotation is not None and ret.annotation.is_write
]
immutable_returns = [
ret
for ret in self.returns
if ret.annotation is None or not ret.annotation.is_write
]
# Some assertions: We don't want any functions with a return type of "-> (Tensor(a!), Tensor)",
# because:
# (1) It's more annoying to handle properly
# (2) It's unnecessary - you can't method-chain on the first (mutated) output because it's part of a tuple.
# Instead, we expect the (a!) argument to not be returned.
assert (
len(mutable_returns) == 0 or len(immutable_returns) == 0
), f"NativeFunctions must have either only mutable returns, or only immutable returns. Found: {str(self)}"
for ret in mutable_returns:
assert any([ret.annotation == arg.annotation for arg in out_and_self]), (
'All mutable returns must be aliased either to a keyword argument, or to "self". '
"Did you forget to mark an out argument as keyword-only?"
)
if self.arguments.out:
# out= ops that return their mutable inputs are only really useful for method chaining.
# And method chaining is only really useful if the thing you're returning is a plain Tensor.
# So ideally, we'd enforce that out= ops with a single plain mutable tensor should return the tensor,
# and all other types of out= op schemas should return void.
# There are a bunch of existing out= ops that return tuples of tensors though, so we're stuck with allowing that.
if any(a.type != BaseType(BaseTy.Tensor) for a in self.arguments.out):
assert (
len(self.returns) == 0
), "out= ops that accept tensor lists as out arguments "
"are expected to have no return type (since you can't do method chaining on them)"
else:
# mutable keyward arguments whose name has _scratch_ prefix are
# scratch tensors for memory planning and should not be returned
assert len(
[
arg
for arg in self.arguments.out
if not arg.name.startswith("_scratch_")
]
) == len(
self.returns
), "Must return as many arguments as there are out arguments, or no return at all"
if self.name.name.inplace:
self_a = self.arguments.self_arg
assert (
self_a
and self_a.argument.annotation
and self_a.argument.annotation.is_write
)
if self_a.argument.type == BaseType(BaseTy.Tensor):
# All inplace ops with an ordinary `Tensor self` argument should return self,
# to allow for method chaining.
assert (
len(self.returns) == 1
and self.returns[0].annotation == self_a.argument.annotation
)
else:
# You can't method chain on non-tensor self arguments though (like a List[Tensor])
# so in all other cases we expect the return type to be none.
assert len(self.returns) == 0
if self.arguments.tensor_options is not None:
assert self.kind() == SchemaKind.functional, (
"Found an operator that is not functional or out varuabt, but has tensor options arguments."
"This is not allowed- tensor options arguments are only allowed for factory functions."
f"schema: {str(self)}"
)
if self.is_functional_fn():
assert self.kind() == SchemaKind.functional, (
"Found an operator that is not functional, but its overload contains the string 'functional'."
"This is a special keyword in the codegen, please use a different overload name."
f"schema: {str(self)}"
)
def is_functional_fn(self) -> bool:
return "functional" in self.name.overload_name
def is_symint_fn(self) -> bool:
# TODO: make this more robust
return "SymInt" in self.name.overload_name
def is_out_fn(self) -> bool:
# Note [is_out_fn]
#
# out functions are the variants which take an explicit out= argument
# to populate into. We need to know if a schema corresponds to an
# out function for several reasons:
#
# - They codegen differently in C++ API
# - codegen to at::add_out rather than at::add
# - out argument is moved to front of C++ argument list
#
# out functions are DEFINED to be any function with a keyword-only
# argument that is mutable. In principle, this could lead to a
# false positive if you define a function that mutates a
# kwarg only argument, but this isn't the "true" output of this
# function. A more robust definition that would work in this
# case would also look at:
#
# - The output types. Out functions take in the arguments
# they mutate and then return them again; this is sort
# of "definitionally" what makes something an out function.
# Historically, we DO check this for consistency.
# - Correspondence with pure variant. An out function
# should have a signature equivalent to its pure variant,
# but just with extra kwargs for the output elements. This
# is difficult to actually check for and historically
# we only do this check in tools/
return bool(self.arguments.out)
def kind(self) -> SchemaKind:
"""
What kind of schema is this? A functional schema is one
that returns a newly allocated output; an inplace schema
modifies the self argument inplace; an out schema writes
the result into an explicitly provided out argument.
"""
is_out = bool(self.arguments.out)
is_scratch = bool(
[arg for arg in self.arguments.out if arg.name.startswith("_scratch_")]
)
is_inplace = self.name.name.inplace
is_mutable = any(
a.annotation is not None and a.annotation.is_write
for a in self.arguments.post_self_positional
)
assert not (is_out and is_inplace)
# out= and inplace schemas can also have post_self_positional mutable args,
# but we give precedence to out= and inplace when deciding the schema kind.
# Tradeoff: we probably don't want to have to teach codegen that looks at inplace ops
# to also worry about mutable post_self_positional arguments,
# but it seems like a much bigger lift to classify them has having a new schema kind.
# The number of ops that fit in this strange category is small enough that
# we can probably manually write code for them instead of forcing the codegen to handle them.
if is_inplace:
return SchemaKind.inplace
elif is_scratch:
assert (
is_out
), "invariant: all scratch operators are expected to be out= operators too"
return SchemaKind.scratch
elif is_out:
assert (
not is_scratch
), "We should not categorize a scratch op as an out variant. Check if the order of if statements are expected!"
return SchemaKind.out
elif is_mutable:
return SchemaKind.mutable
else:
return SchemaKind.functional
# For every return:
# - If the return aliases an input, we return the input name
# - Otherwise, we return None.
# If return names were enforced to be consistent with aliasing information, then we wouldn't need this.
def aliased_return_names(self) -> List[Optional[str]]:
outs: List[Optional[str]] = []
for r in self.returns:
aliased_args = [
a
for a in self.arguments.flat_all
if a.annotation is not None and a.annotation == r.annotation
]
if len(aliased_args) == 0:
outs.append(None)
elif len(aliased_args) == 1:
outs.append(aliased_args[0].name)
else:
aliased_names = ", ".join(a.name for a in aliased_args)
raise AssertionError(
f"Found a return ({r.name})that aliases multiple inputs ({aliased_names})"
)
return outs
def signature(
self,
*,
strip_default: bool = False,
strip_view_copy_name: bool = False,
keep_return_names: bool = False,
) -> "FunctionSchema":
"""
Certain schemas are 'related', in that they are simply
inplace/out/functional versions of the same function. This method
factors these schemas into the "core" functional signature which
is equal across all versions.
Here is what normalization happens to the schema to convert
it to a signature:
- The overload name is stripped (name is retained, since
it expresses semantic content about what the function does)
- Inplace is set False
- Out arguments are stripped
- Mutable post_self_positional args are converted to returns
- Mutability annotations are stripped (this is sound
because you cannot overload on mutability annotation)
- Return names are stripped since they are not overloadable and
some variants have return names but some not
- TensorOptions are dropped
because out= variants of factory functions don't include them
(and we want to be able to pair up factory functions with their out variants)
Finally, we want to be able to pair up related "view" and their
corresponding "view_copy" operators. We do this by optionally
stripping the trailing "_copy" from the base name.
Example of a mutable op before and after:
f.func (Mutable operator):
_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask) # noqa: B950
f.func (Corresponding functional operator):
_fused_moving_avg_obs_fq_helper.functional(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor running_min, Tensor running_max, Tensor scale, Tensor zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask, Tensor running_min_out, Tensor running_max_out, Tensor scale_out, Tensor zero_point_out) # noqa: B950
f.func.signature() output:
_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor running_min, Tensor running_max, Tensor scale, Tensor zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) # noqa: B950
"""
def strip_ret_annotation(r: Return) -> Return:
return Return(
name=r.name if keep_return_names else None,
type=r.type,
annotation=None,
)
base_name = self.name.name.base
if strip_view_copy_name and base_name.endswith("_copy"):
base_name = base_name.replace("_copy", "")
# find mutable inputs that are not originally returned, and convert them to returns
returns_from_mutable_inputs = tuple(
# When we're grouping functions we strip the return names,
# but when we're generating the actual functional variants then we follow
# a convention for what to name the returns
Return(
name=f"{a.name}_out" if keep_return_names else None,
type=a.type,
annotation=None,
)
for a in itertools.chain(
# Order is important here (otherwise e.g. inplace with mutable args
# and out= with mutable args won't have the same signature)
[self.arguments.self_arg.argument]
if self.arguments.self_arg is not None
else [],
self.arguments.out,
self.arguments.post_self_positional,
)
if a.annotation is not None
and a.annotation.is_write
and not any(a.annotation == r.annotation for r in self.returns)
)
original_returns = tuple(map(strip_ret_annotation, self.returns))
# Ordering is important here. We expect the "mutable input" returns to come last.
returns = original_returns + returns_from_mutable_inputs
args_sig = self.arguments.signature(strip_default=strip_default)
# See Note [bernoulli.p schema]
if str(self.name) == "bernoulli.p":
args_sig = Arguments.parse(str(args_sig).replace("float p", "float p=0.5"))
return FunctionSchema(
name=OperatorName(
name=BaseOperatorName(
base=base_name,
inplace=False,
dunder_method=self.name.name.dunder_method,
),
overload_name="", # stripped
),
arguments=args_sig,
returns=returns,
)
def view_signature(self) -> "FunctionSchema":
return self.signature(strip_view_copy_name=True)
def with_name(self, name: "OperatorName") -> "FunctionSchema":
return FunctionSchema(
name=name,
arguments=self.arguments,
returns=self.returns,
)
@property
def modifies_arguments(self) -> bool:
return self.kind() in [SchemaKind.inplace, SchemaKind.out, SchemaKind.mutable]
def __str__(self) -> str:
all_arguments_str = str(self.arguments)
if len(self.returns) == 1:
returns = str(self.returns[0]) # omit parentheses
else:
returns = "(" + ", ".join(map(str, self.returns)) + ")"
return f"{self.name}({all_arguments_str}) -> {returns}"
# Here is the rest of the data model, described more briefly.
# Simplified version for what actually shows up in built-ins.
# Look at alias_info.h for expanded syntax. If you need the structure,
# you also need to make this structure recursive so it can be lined
# up with the type components too. For primitives this isn't really
# necessary
@dataclass(frozen=True)
class Annotation:
# Typically only has one element. Not actually a set so
# we can conveniently assume it is canonically ordered
alias_set: Tuple[str, ...]
is_write: bool
alias_set_after: str
@staticmethod
def parse(ann: str) -> "Annotation":
# Only handling afterSet == Wildcard for now
becomes_wildcard_index = ann.find(" -> *")
if becomes_wildcard_index != -1:
after_set = "*"
# TODO: im not good enough with regexes to ignore -> *
m = re.match(
r"^([a-z])(!?)(!?)$",
ann[:becomes_wildcard_index]
+ ann[becomes_wildcard_index + len(" -> *") :],
)
else:
after_set = ""
m = re.match(r"^([a-z])(!?)(!?)$", ann)
assert m is not None, f"unrecognized alias annotation {ann}"
alias_set = (m.group(1),)
is_write = m.group(2) == "!"
r = Annotation(
alias_set=alias_set, is_write=is_write, alias_set_after=after_set
)
assert str(r) == ann, f"{r} != {ann}"
return r
def __str__(self) -> str:
alias_set = "|".join(self.alias_set)
if self.alias_set_after:
alias_set = f'{alias_set}{" -> "}{self.alias_set_after}'
is_write = "!" if self.is_write else ""
return f"{alias_set}{is_write}"
# The base class for the type system. This is also loosely modeled
# off of jit_type.h, but we've simplified the hierarchy to focus
# in on the aspects of the type system that matter for code generation
# (for example, there's no SingleElementType subclass anymore).
# You never actually construct a Type; usually it's going to be one
# of the subclasses. If Python had ADTs this would be one!
@dataclass(frozen=True)
class Type:
@staticmethod
def parse(t: str) -> "Type":
r = Type._parse(t)
assert str(r) == t, f"{r} != {t}"
return r
@staticmethod
def _parse(t: str) -> "Type":
m = re.match(r"^(.+)\?$", t)
if m is not None:
return OptionalType(Type.parse(m.group(1)))
m = re.match(r"^(.+)\[([0-9]+)?\]$", t)
if m is not None:
size = int(m.group(2)) if m.group(2) is not None else None
return ListType(elem=Type.parse(m.group(1)), size=size)
# '__torch__.torch.classes.' is the prefix for custom class
m = re.match(r"^__torch__\.torch\.classes\.([a-zA-Z0-9_.]+)$", t)
if m is not None:
return CustomClassType(m.group(1))
try:
return BaseType(BaseTy[t])
except KeyError:
raise RuntimeError(f"unrecognized type {t}")
def __str__(self) -> str:
raise NotImplementedError
# WARNING: These concepts are not very well-defined. For example,
# is "int?" nullable? How about "int?[]". They are defined
# so we can conveniently generate legacy Declarations.yaml but
# really we should probably just remove these at some point
def is_tensor_like(self) -> bool:
raise NotImplementedError
def is_nullable(self) -> bool:
raise NotImplementedError
def is_list_like(self) -> Optional["ListType"]:
raise NotImplementedError
def symint_to_int(self) -> "Type":
raise NotImplementedError
# Base types are simple, atomic types with no further structure
BaseTy = Enum(
"BaseTy",
(
"Generator",
"ScalarType",
"Tensor",
"int",
"Dimname",
"DimVector",
"float",
"str",
"bool",
"Layout",
"Device",
"Scalar",
"MemoryFormat",
"QScheme",
"Storage",
"Stream",
"SymInt",
"ConstQuantizerPtr", # TODO: rename
),
)
@dataclass(frozen=True)
class BaseType(Type):
name: BaseTy
def __str__(self) -> str:
return f"{self.name.name}"
def is_tensor_like(self) -> bool:
return self.name == BaseTy.Tensor
def is_nullable(self) -> bool:
return False
def symint_to_int(self) -> "BaseType":
if self.name == BaseTy.SymInt:
return BaseType(BaseTy.int)
return self
def is_list_like(self) -> Optional["ListType"]:
return None
# Optional types may be specified, or may also be validly given None
@dataclass(frozen=True)
class OptionalType(Type):
elem: Type
def __str__(self) -> str:
return f"{self.elem}?"
def is_tensor_like(self) -> bool:
return self.elem.is_tensor_like()
def is_nullable(self) -> bool:
return True
def symint_to_int(self) -> "Type":
return dataclasses.replace(self, elem=self.elem.symint_to_int())
def is_list_like(self) -> Optional["ListType"]:
return self.elem.is_list_like()
# A type representing a PyTorch custom class
@dataclass(frozen=True)
class CustomClassType(Type):
class_name: str
def __str__(self) -> str:
"""
Return the class name will prefix __torch__.torch.classes
"""
return f"__torch__.torch.classes.{self.class_name}"
def is_tensor_like(self) -> bool:
"""
Assume a custom class is not a tensor.
"""
return False
def is_nullable(self) -> bool:
"""
Assume a custom class is not nullable.
"""
return False
def symint_to_int(self) -> "Type":
return self
def is_list_like(self) -> Optional["ListType"]:
return None
# List types specify that we may have multiples of an element. We
# also support explicit sizes on list types, but these have
# some nontrivial semantics! (However, for C++ API purposes, explicit
# sizes are mostly erased from the type system.)
#
# DANGER WILL ROBINSON: C++ elaboration depends on elem type; e.g.,
# int[] elaborates differently than bool[3]!
@dataclass(frozen=True)
class ListType(Type):
elem: Type
size: Optional[int]
def __str__(self) -> str:
size = f"{self.size}" if self.size else ""
return f"{self.elem}[{size}]"
def is_tensor_like(self) -> bool:
return self.elem.is_tensor_like()
def is_nullable(self) -> bool:
return self.elem.is_nullable()
def symint_to_int(self) -> "ListType":
return ListType(self.elem.symint_to_int(), self.size)
def is_list_like(self) -> Optional["ListType"]:
return self
@dataclass(frozen=True)
class Argument:
# NB: I didn't put kwarg_only as a boolean field here, unlike
# c10::Argument, so that printing works correctly
name: str
type: Type
default: Optional[str]
# The semantics of the annotation field are a little strange.
#
# Alias annotations parametrize Tensors (since Tensors are the only things
# that can alias.) This motivates why I write Tensor(a!)? (and not, for
# example, Tensor?(a!)), because the (a!) describes aliasing on the tensor,
# which may be optional (i.e., the alias annotation should bind first to
# Tensor, before the optional postfix annotation).
#
# However, despite being a property of Tensor, we (and c10::Argument)
# store the annotation at the top level of the Argument, rather than
# inside the embedded Tensor type. In the C++ version of this
# class, we then go through great lengths to mimic the type
# structure in the annotation structure so we can correlate
# annotations with types.
#
# Now, it turns out, in all applications in code generation, the
# structure of annotated types is very simple. So we just hard
# code it here. But if we ever do get anything more complex, this
# model will have to change!
annotation: Optional[Annotation]
@staticmethod
def parse(arg: str) -> "Argument":
name: str
default: Optional[str]
type_and_annot, name_and_default = arg.rsplit(" ", 1)
if "=" in name_and_default:
name, default = name_and_default.split("=")
else:
name = name_and_default
default = None
# TODO: deduplicate annotation matching with Return
match = re.match(r"Tensor\((.+)\)(.*)", type_and_annot)
annotation: Optional[Annotation]
if match:
# If you update this, make sure the __str__ still works too
assert match.group(2) in [
"",
"?",
"[]",
], "unrecognized alias analysis form with Tensor"
type_s = "Tensor" + match.group(2)
annotation = Annotation.parse(match.group(1))
else:
type_s = type_and_annot
annotation = None
type = Type.parse(type_s)
r = Argument(
name=name,
type=type,
default=default,
annotation=annotation,
)
assert str(r) == arg, f"{str(r)} != {arg}"
return r
@property
def is_write(self) -> bool:
return self.annotation is not None and self.annotation.is_write
def symint_to_int(self) -> "Argument":
return dataclasses.replace(self, type=self.type.symint_to_int())
def __str__(self) -> str:
type = f"{self.type}"
if self.annotation:
assert type in ["Tensor", "Tensor?", "Tensor[]"]
type = type.replace("Tensor", f"Tensor({self.annotation})")
if self.name is None:
return type
else:
mb_default = ""
if self.default:
mb_default = f"={self.default}"
return f"{type} {self.name}{mb_default}"
@dataclass(frozen=True)
class Return:
name: Optional[str]
type: Type
annotation: Optional[Annotation]
@staticmethod
def parse(arg: str) -> "Return":
name: Optional[str]
if " " in arg:
type_and_annot, name = arg.rsplit(" ", 1)
else:
type_and_annot = arg
name = None
match = re.match(r"Tensor\((.+)\)(.*)", type_and_annot)
annotation: Optional[Annotation]
if match:
# If you update this, make sure the __str__ still works too
assert match.group(2) in [
"",
"?",
"[]",
], "unrecognized alias analysis form with Tensor"
type_s = "Tensor" + match.group(2)
annotation = Annotation.parse(match.group(1))
else:
type_s = type_and_annot
annotation = None
type = Type.parse(type_s)
r = Return(
name=name,
type=type,
annotation=annotation,
)
assert str(r) == arg, f"{str(r)} != {arg}"
return r
@property
def is_write(self) -> bool:
return self.annotation is not None and self.annotation.is_write
def __str__(self) -> str:
type = f"{self.type}"
if self.annotation:
assert type in ["Tensor", "Tensor?", "Tensor[]"]
type = type.replace("Tensor", f"Tensor({self.annotation})")
if self.name is None:
return type
else:
return f"{type} {self.name}"
# Represents the self argument for functions that may be methods
@dataclass(frozen=True)
class SelfArgument:
argument: Argument
# Bundle of arguments that represent a TensorOptions. This is mostly
# relevant for the public C++ API but we bake it into the core data
# model because other APIs often have to interact with it
@dataclass(frozen=True)
class TensorOptionsArguments:
dtype: Argument
layout: Argument
device: Argument
pin_memory: Argument
def all(self) -> Sequence[Argument]:
return [self.dtype, self.layout, self.device, self.pin_memory]
@dataclass(frozen=True)
class Arguments:
# pre_self_positional is usually empty, but is notably non-empty
# for where.self, where the condition argument comes before the
# self argument
pre_self_positional: Tuple[Argument, ...]
self_arg: Optional[SelfArgument]
post_self_positional: Tuple[Argument, ...]
pre_tensor_options_kwarg_only: Tuple[Argument, ...]
tensor_options: Optional[TensorOptionsArguments]
# post_tensor_options is typically memory format, which should be
# part of tensor options but isn't right now, and is usually
# placed after the tensor options arguments
post_tensor_options_kwarg_only: Tuple[Argument, ...]
# Unlike in the previous codegen, we have factored out 'out' arguments
# in the canonical representation, removing them from kwarg
# arguments. This choice is justified by numerous downstream
# transformations which treat out arguments specially; additionally,
# you can see that canonicity is not violated!
out: Tuple[Argument, ...] # these are also kwarg-only
@property
def flat_non_out(self) -> Sequence[Argument]:
ret: List[Argument] = []
ret.extend(self.flat_positional)
ret.extend(self.flat_kwarg_only)
return ret
@property
def flat_positional(self) -> Sequence[Argument]:
ret: List[Argument] = []
ret.extend(self.pre_self_positional)
if self.self_arg is not None:
ret.append(self.self_arg.argument)
ret.extend(self.post_self_positional)
return ret
@property
def post_self_positional_mutable(self) -> Sequence[Argument]:
return [a for a in self.post_self_positional if a.is_write]
# NB: doesn't contain out arguments
@property
def flat_kwarg_only(self) -> Sequence[Argument]:
ret: List[Argument] = []
ret.extend(self.pre_tensor_options_kwarg_only)
if self.tensor_options is not None:
ret.extend(self.tensor_options.all())
ret.extend(self.post_tensor_options_kwarg_only)
return ret
@property
def flat_all(self) -> Sequence[Argument]:
ret: List[Argument] = []
ret.extend(self.flat_positional)
ret.extend(self.flat_kwarg_only)
ret.extend(self.out)
return ret
@property
def non_out(
self,
) -> Sequence[Union[Argument, SelfArgument, TensorOptionsArguments]]:
ret: List[Union[Argument, SelfArgument, TensorOptionsArguments]] = []
ret.extend(self.positional)
ret.extend(self.kwarg_only)
return ret
@property
def positional(self) -> Sequence[Union[Argument, SelfArgument]]:
ret: List[Union[Argument, SelfArgument]] = []
ret.extend(self.pre_self_positional)
if self.self_arg is not None:
ret.append(self.self_arg)
ret.extend(self.post_self_positional)
return ret
@property
def kwarg_only(self) -> Sequence[Union[Argument, TensorOptionsArguments]]:
ret: List[Union[Argument, TensorOptionsArguments]] = []
ret.extend(self.pre_tensor_options_kwarg_only)
if self.tensor_options is not None:
ret.append(self.tensor_options)
ret.extend(self.post_tensor_options_kwarg_only)
return ret
@property
def all(self) -> Sequence[Union[Argument, SelfArgument, TensorOptionsArguments]]:
ret: List[Union[Argument, SelfArgument, TensorOptionsArguments]] = []
ret.extend(self.positional)
ret.extend(self.kwarg_only)
ret.extend(self.out)
return ret
def mutable_arg_names(self) -> List[str]:
return [
a.name
for a in self.flat_all
if a.annotation is not None and a.annotation.is_write
]
def symints_to_ints(self) -> "Arguments":
arguments = self
if arguments.self_arg:
arguments = dataclasses.replace(
arguments,
pre_self_positional=tuple(
x.symint_to_int() for x in arguments.pre_self_positional
),
)
if self.tensor_options:
arguments = dataclasses.replace(
arguments,
post_tensor_options_kwarg_only=tuple(
x.symint_to_int() for x in arguments.post_tensor_options_kwarg_only
),
)
arguments = dataclasses.replace(
arguments,
post_self_positional=tuple(
x.symint_to_int() for x in arguments.post_self_positional
),
pre_tensor_options_kwarg_only=tuple(
x.symint_to_int() for x in arguments.pre_tensor_options_kwarg_only
),
)
return arguments
def has_tensor_arg(self) -> bool:
return any(a.type.is_tensor_like() for a in self.flat_non_out)
def signature(self, *, strip_default: bool = False) -> "Arguments":
# dataclasses.replace could be used here, but it is less
# type safe so for now I've opted to type everything out
def strip_arg_annotation(a: Argument) -> Argument:
return Argument(
name=a.name,
type=a.type,
default=a.default if not strip_default else None,
annotation=None,
)
return Arguments(
pre_self_positional=tuple(
map(strip_arg_annotation, self.pre_self_positional)
),
self_arg=SelfArgument(strip_arg_annotation(self.self_arg.argument))
if self.self_arg is not None
else None,
post_self_positional=tuple(
map(strip_arg_annotation, self.post_self_positional)
),
# Since TensorOptions are droped, the post_tensor_options_kwargs are
# converted to pre_tensor_options_kwargs
pre_tensor_options_kwarg_only=tuple(
map(strip_arg_annotation, self.pre_tensor_options_kwarg_only)
)
+ tuple(map(strip_arg_annotation, self.post_tensor_options_kwarg_only)),
# TensorOptions are dropped in signature,
# so we can pair factory functions with their out= variants.
tensor_options=None,
post_tensor_options_kwarg_only=tuple(),
# out arguments are dropped in signature
out=(),
)
def remove_self_annotation(self) -> "Arguments":
assert self.self_arg is not None
return dataclasses.replace(
self,
self_arg=SelfArgument(
dataclasses.replace(self.self_arg.argument, annotation=None)
),
)
def with_out_args(self, outs: List[Argument]) -> "Arguments":
assert len(self.out) == 0
return dataclasses.replace(
self,
out=tuple(outs),
)
@staticmethod
def _preparse(args: str) -> Tuple[List[Argument], List[Argument], List[Argument]]:
positional: List[Argument] = []
kwarg_only: List[Argument] = []
out: List[Argument] = []
arguments_acc = positional
# TODO: Use a real parser here; this will get bamboozled
# by signatures that contain things like std::array<bool, 2> (note the space)
for arg in args.split(", "):
if not arg:
continue
if arg == "*":
assert (
arguments_acc is positional
), "invalid syntax: kwarg-only specifier * can only occur once"
arguments_acc = kwarg_only
continue
parg = Argument.parse(arg)
# Currently, we rely directly on the invariant that there are NO
# kwarg-only mutating arguments. If you want to relax this,
# we will need a more semantic way of matching that takes
# into account return arguments. In that case, you will have
# to manage out computation a level up, in FunctionSchema. See Note
# [is_out_fn]
if parg.annotation is not None and parg.annotation.is_write:
if arguments_acc is positional:
pass # do nothing
elif arguments_acc is kwarg_only:
arguments_acc = out
else:
assert arguments_acc is not out
arguments_acc.append(parg)
return positional, kwarg_only, out
@staticmethod
def parse(args: str) -> "Arguments":
"""
Input: 'int x, int y, int z'
"""
# We do this in two phases. First we parse into three
# main categories: positional, kwarg_only, out.
# Then, we reparse positional and kwarg_only to separate
# out the self argument and tensor options arguments.
positional, kwarg_only, out = Arguments._preparse(args)
# Split self argument
self_ix = None
for i, a in enumerate(positional):
if a.name == "self":
self_ix = i
break
pre_self_positional: List[Argument]
self_arg: Optional[SelfArgument]
post_self_positional: List[Argument]
if self_ix is not None:
pre_self_positional = positional[:self_ix]
self_arg = SelfArgument(positional[self_ix])
post_self_positional = positional[self_ix + 1 :]
else:
pre_self_positional = []
self_arg = None
post_self_positional = positional
# Group tensor options arguments
pre_tensor_options_kwarg_only: List[Argument] = []
tensor_options: Optional[TensorOptionsArguments] = None
post_tensor_options_kwarg_only: List[Argument] = []
kwarg_only_acc = pre_tensor_options_kwarg_only
def pred(name: str, ty: Type) -> Callable[[Argument], bool]:
return lambda a: a.name == name and a.type in [ty, OptionalType(ty)]
predicates = [ # order matters
pred("dtype", Type.parse("ScalarType")),
pred("layout", Type.parse("Layout")),
pred("device", Type.parse("Device")),
pred("pin_memory", Type.parse("bool")),
]
i = 0
while i < len(kwarg_only):
# If there is enough space...
if i <= len(kwarg_only) - len(predicates):
# And the next len(predicates) arguments look like TensorOptions arguments
if all(
p(a)
for p, a in zip(predicates, kwarg_only[i : i + len(predicates)])
):
assert kwarg_only_acc is pre_tensor_options_kwarg_only
# Group them together as one argument
tensor_options = TensorOptionsArguments(
dtype=kwarg_only[i],
layout=kwarg_only[i + 1],
device=kwarg_only[i + 2],
pin_memory=kwarg_only[i + 3],
)
i += len(predicates)
kwarg_only_acc = post_tensor_options_kwarg_only
continue
kwarg_only_acc.append(kwarg_only[i])
i += 1
return Arguments(
pre_self_positional=tuple(pre_self_positional),
self_arg=self_arg,
post_self_positional=tuple(post_self_positional),
pre_tensor_options_kwarg_only=tuple(pre_tensor_options_kwarg_only),
tensor_options=tensor_options,
post_tensor_options_kwarg_only=tuple(post_tensor_options_kwarg_only),
out=tuple(out),
)
def __str__(self) -> str:
all_arguments: List[str] = []
all_arguments.extend(map(str, self.flat_positional))
if self.flat_kwarg_only or self.out:
all_arguments.append("*")
all_arguments.extend(map(str, self.flat_kwarg_only))
all_arguments.extend(map(str, self.out))
return ", ".join(all_arguments)
def __post_init__(self) -> None:
# TODO: These invariants are weirdly asymmetric?
# TODO: Fancier types?
if self.self_arg is None:
assert not self.pre_self_positional
if self.tensor_options is None:
assert not self.post_tensor_options_kwarg_only
# We don't allow any of the following to have argument annotations,
# to keep things simple.
mutable_pre_self_positionals = [
a
for a in self.pre_self_positional
if a.annotation is not None and a.annotation.is_write
]
assert (
len(mutable_pre_self_positionals) == 0
), "mutable pre_self_positional arguments are not currently supported in the schema"
# Names that validly are __iXXX__ indicating inplace operations.
# Taken from https://www.python.org/dev/peps/pep-0203/#new-methods
# NB: PyTorch hasn't actually implemented all of these
AUGMENTED_ASSIGNMENT_NAMES = [
"add",
"sub",
"mul",
"div",
"mod",
"pow",
"lshift",
"rshift",
"and",
"xor",
"or",
]
# A BaseOperatorName is what we think of the operator name, without
# the overload name. Unusually, we don't represent this as just a
# string; instead, we directly represent a few important semantic
# bits of information we derive from the string: namely whether
# or not it's inplace (add_) and whether or not it's a double-underscore
# method (__add__)
@dataclass(frozen=True)
class BaseOperatorName:
base: str
inplace: bool
dunder_method: bool
# Note [Overload Ambiguity With Functional Variants]
# A handful of operators have both a "mutable" and a "functional" variant.
# (native_batch_norm is a good example, although this isn't the case today).
# For those operators, the mutable and functional variant take in the same set of
# arguments, but have different alias annotations.
# this makes it ambiguous when you try to resolve an OverloadPacket into an overload,
# given a set of input arguments.
#
# So instead of making the "functional" variant in this case a real overload, e.g:
# native_batch_norm (mutable variant)
# native_batch_norm.functional (functional variant)
# we make it a new base operator,
# native_batch_norm_functional (functional variant)
#
# In an ideal world, we would probably invert this so the operators were:
# native_batch_norm.mutable (mutable variant)
# native_batch_norm (functional variant)
#
# Doing that is BC-breaking though, so we're stuck with the above modeling.
functional_overload: bool = False
@staticmethod
def parse(op: str) -> "BaseOperatorName":
assert op != ""
assert not op.endswith("_out"), (
"_out suffix is reserved and not permitted for operator names; "
"did you mean to specify an out overload name instead?"
)
m = re.match(r"^__([^_]+)__$", op)
if m is not None:
dunder_method = True
base = m.group(1)
if any(base == f"i{n}" for n in AUGMENTED_ASSIGNMENT_NAMES):
inplace = True
base = base[1:]
else:
inplace = False
# temporary, this is not intrinsically true but
# has been historically true for dunder methods
# we support (but, if we ever got, say, __int__, this would
# be wrong!)
assert base[0] != "i"
else:
dunder_method = False
base = op
if base[-1] == "_":
inplace = True
base = base[:-1]
else:
inplace = False
# See Note [Overload Ambiguity With Functional Variants]
functional_suffix = "_functional"
if base.endswith(functional_suffix):
functional_overload = True
base = base[: -len(functional_suffix)]
# This seems complicated and unnecessary, so banning dunder methods
# for now on ops that have a functional + mutable variant (like native_batch_norm).
assert not dunder_method and not inplace
else:
functional_overload = False
r = BaseOperatorName(
base=base,
inplace=inplace,
dunder_method=dunder_method,
functional_overload=functional_overload,
)
assert str(r) == op, f"{str(r)} != {op}"
return r
def __str__(self) -> str:
if self.dunder_method:
i = "i" if self.inplace else ""
return f"__{i}{self.base}__"
else:
i = (
"_"
if self.inplace
else "_functional"
if self.functional_overload
else ""
)
return f"{self.base}{i}"
# Operator name is the base operator name along with the (typically not
# user visible) overload string.
@dataclass(frozen=True)
class OperatorName:
name: BaseOperatorName
overload_name: str
@staticmethod
def parse(op_name: str) -> "OperatorName":
if "." in op_name:
name, overload_name = op_name.split(".", 1)
else:
name = op_name
overload_name = ""
r = OperatorName(name=BaseOperatorName.parse(name), overload_name=overload_name)
assert str(r) == op_name, f"{str(r)} != {op_name}"
return r
def __str__(self) -> str:
if self.overload_name:
return f"{self.name}.{self.overload_name}"
else:
return f"{self.name}"
# NB: This must be synchronized with the naming scheme in
# aten/src/ATen/templates/Operators.h
# Given a function schema "aten::op.overload(...)",
# If there is no overload name, this returns f"{op}"
# If there is an overload name, this returns f"{op}_{overload}"
def unambiguous_name(self) -> str:
if self.overload_name:
return f"{self.name}_{self.overload_name}"
else:
return f"{self.name}"
def remove_inplace(self) -> "OperatorName":
return OperatorName(
name=BaseOperatorName(
base=self.name.base,
inplace=False,
dunder_method=self.name.dunder_method,
),
overload_name=self.overload_name,
)
def with_overload(self, overload: str) -> "OperatorName":
return OperatorName(
name=BaseOperatorName(
base=self.name.base,
inplace=False,
dunder_method=self.name.dunder_method,
),
overload_name=overload,
)
def gets_generated_out_inplace_wrapper(
f: NativeFunction, g: NativeFunctionsGroup, b: BackendIndex
) -> bool:
return (
f.func.kind() is not SchemaKind.functional
and not b.has_kernel(f)
and b.has_kernel(g.functional)
)
# NativeFunction objects that are views (f.is_view_op returns True)
# are added into a `NativeFunctionsViewGroup`, which we can use to
# easily access the generated (optional) view_copy NativeFunction.
# It's convenient to group them together, so we pair them up in NativeFunctionsViewGroup.
# See Note [Codegen'd {view}_copy Operators]
#
# One property of this representation is that in order for a view-like op to be part of
# a NativeFunctionsViewGroup, the "aliasing" version of that view op must exist.
# There's one case where that doesn't happen: we have a non-aliasing `narrow_copy.out` op,
# but don't have corresponding aliasing `narrow.out` op.
# This means that `narrow_copy.out` won't appear as a NativeFunctionsViewGroup.
@dataclass(frozen=True)
class NativeFunctionsViewGroup:
view: NativeFunction
# Note: the {view}_copy operator is optional because we currently don't generate copy variants
# for all view ops. Notably, we don't generate them for CompositeImplicitAutograd views
# (we already get them "for free" through decomposition)
view_copy: Optional[NativeFunction]
# view_inplace ops are also optional, but every view_inplace op should have out-of-place variant.
view_inplace: Optional[NativeFunction]
def __post_init__(self) -> None:
assert self.view.is_view_op
if self.view_copy is None:
assert not gets_generated_view_copy(self.view), (
f"{str(self.view.func.name)} appears to be a new operator that aliases its inputs."
" The codegen expects you to add a corresponding operator to native_functions.yaml:"
f" {get_view_copy_name(self.view)!s}."
" See Note [view_copy NativeFunctions] for details."
)
else:
assert self.view_copy.func.name.name.base.endswith("_copy")
assert self.view.func.signature() == self.view_copy.func.signature(
strip_view_copy_name=True
)
assert "view_copy" in self.view_copy.tags, (
f"{str(self.view_copy.func.name), str(self.view.tags)} appears to be a view_copy operator. The codegen expects"
" view_copy operators to be annotated with the 'view_copy' tag in native_functions.yaml."
" See Note [view_copy NativeFunction] for details."
)
if self.view_inplace is not None:
assert self.view.func.signature() == self.view_inplace.func.signature()
if self.view.has_composite_implicit_autograd_kernel:
if self.view_inplace is not None:
assert self.view_inplace.has_composite_implicit_autograd_kernel, (
f"{str(self.view.func.name)} and {str(self.view_inplace.func.name)} must either"
" both have CompositeImplicitAutograd kernels, or both not have composite kernels."
)
def functions(self, *, include_copy: bool = True) -> Iterator[NativeFunction]:
yield self.view
if self.view_inplace is not None:
yield self.view_inplace
if self.view_copy is not None and include_copy:
yield self.view_copy
@property
def root_name(self) -> str:
return self.view.root_name
@property
def composite(self) -> bool:
# We currently assert that the "group" is consistent.
# If the view op is composite, then its view_inplace op is too.
return self.view.has_composite_implicit_autograd_kernel
def gets_generated_view_copy(f: NativeFunction) -> bool:
# Only aliasing (view) operators get a copy variant.
if not f.is_view_op:
return False
# We don't need to bother generating copy variants for CompositeImplicitAutograd ops,
# because we can let them decompose into base view ops.
if f.has_composite_implicit_autograd_kernel:
return False
# We also don't need to generate copy variants for inplace views.
if "inplace_view" in f.tags:
return False
return True
# Given a NativeFunction that corresponds to a view op,
# returns the OperatorName of the corresponding "copy" variant of the op.
def get_view_copy_name(f: NativeFunction) -> "OperatorName":
# Right now, when asking for a view op's corresponding "view_copy" name
# we assert for sanity that the op is allowed to have a generated view_copy variant.
# (We can do this because "gets_generated_view_copy()" tell us which ops get a generated view_copy op).
# However, narrow_copy() already exists as an op directly in native_functions.yaml.
# I'm hardcoding narrow_copy here for now to maintain the assert,
# But we could also just get rid of the assert.
list_of_ops_with_explicit_view_copy_operators = ["narrow"]
if str(f.func.name) not in list_of_ops_with_explicit_view_copy_operators:
assert gets_generated_view_copy(f)
base_name = f"{f.func.name.name.base}_copy"
view_copy_name = OperatorName(
name=BaseOperatorName(
base=base_name, inplace=False, dunder_method=f.func.name.name.dunder_method
),
overload_name=f.func.name.overload_name,
)
return view_copy_name
# Helper functions for parsing argument lists (both inputs and returns)
def parse_returns(return_decl: str) -> Tuple[Return, ...]:
"""
Input: '()'
Output: []
"""
if return_decl == "()":
return ()
if return_decl[0] == "(" and return_decl[-1] == ")":
return_decl = return_decl[1:-1]
return tuple(Return.parse(arg) for arg in return_decl.split(", "))
# A Precompute instance consists of a map from kernel argument name
# to the list of Argument instances that should replace that
# kernel argument in the impl function.
@dataclass(frozen=True)
class Precompute:
# A map from kernel argument name -> a list of precomputed
# elements that replaces/supersedes it.
replace: Dict[str, List[Argument]]
# List of precomputed args added without replacement
add: List[Argument]
@staticmethod
def parse(src: object) -> "Precompute":
assert isinstance(src, list)
# src is a list of strings of the format:
# {kernel param name} -> {replacement decl}[, {replacement decl}, ...]
# [{add decl}[, {add decl}, ...]]
# The last line is optional and contains the precomputed parameters that are
# added without replacement.
# The other lines are parsed to get the names of which precomputed elements
# should replace which kernel arguments.
add_args = []
if " -> " not in src[-1]:
add_list = src[-1].split(",")
add_args = [Argument.parse(name.strip()) for name in add_list]
src = src[:-1]
replace = {}
for raw_replace_item in src:
assert isinstance(raw_replace_item, str)
assert " -> " in raw_replace_item, (
"precomputed parameters without replacement"
" are allowed only in the last line"
)
arg, with_list_raw = raw_replace_item.split(" -> ")
with_list = with_list_raw.split(",")
with_list_args = [Argument.parse(name.strip()) for name in with_list]
replace[arg] = with_list_args
r = Precompute(replace=replace, add=add_args)
assert r.to_list() == src, "r.to_list() != src"
return r
def __post_init__(self) -> None:
# the template parameters are upper so if these are the
# same then it is ambiguous
for a in self.add:
assert a.name.upper() != a.name
for args in self.replace.values():
for a in args:
assert a.name.upper() != a.name
def to_list(self) -> List[str]:
replace_list = []
for kernel_param, replacement_params in self.replace.items():
replacements = ", ".join(str(param) for param in replacement_params)
replace_list.append(f"{kernel_param} -> {replacements}")
return replace_list
| pytorch-master | torchgen/model.py |
import contextlib
import functools
from typing import Callable, Dict, Iterator, Optional, TypeVar, Union
import torchgen.local as local
from torchgen.model import (
BackendIndex,
DispatchKey,
NativeFunction,
NativeFunctionsGroup,
NativeFunctionsViewGroup,
)
from torchgen.utils import context, S, T
# Helper functions for defining generators on things in the model
F = TypeVar(
"F",
NativeFunction,
NativeFunctionsGroup,
NativeFunctionsViewGroup,
Union[NativeFunction, NativeFunctionsGroup],
Union[NativeFunction, NativeFunctionsViewGroup],
)
F2 = TypeVar(
"F2",
NativeFunction,
NativeFunctionsGroup,
Optional[NativeFunction],
bool,
str,
)
@contextlib.contextmanager
def native_function_manager(
g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup, NativeFunction]
) -> Iterator[None]:
if isinstance(g, NativeFunctionsGroup):
# By default, we associate all errors with structured native functions
# with the out variant. In some cases, it might be better to have
# a more specific place to hang things; if so, use
# native_function_manager again on the inside
f = g.out
elif isinstance(g, NativeFunctionsViewGroup):
# We associate errors with the view operator
f = g.view
else:
f = g
with context(lambda: f"in native_functions.yaml line {f.loc}:\n {f.func}"):
with local.parametrize(
use_const_ref_for_mutable_tensors=f.use_const_ref_for_mutable_tensors
):
yield
# Given a function that operates on NativeFunction, wrap it into a new function
# that sets some appropriate context managers for that native function.
# YOU MUST WRAP FUNCTIONS IN THIS for calls to api modules to be sound
# (you will get an error if we try to access the local variables without having
# set them).
def with_native_function(func: Callable[[F], T]) -> Callable[[F], T]:
@functools.wraps(func)
def wrapper(f: F) -> T:
with native_function_manager(f):
return func(f)
return wrapper
def with_native_function_and(func: Callable[[F, F2], T]) -> Callable[[F, F2], T]:
@functools.wraps(func)
def wrapper(f: F, f2: F2) -> T:
# The first native_function is assumed to be the one with the appropriate context.
with native_function_manager(f):
return func(f, f2)
return wrapper
def method_with_native_function(func: Callable[[S, F], T]) -> Callable[[S, F], T]:
@functools.wraps(func)
def wrapper(slf: S, f: F) -> T:
with native_function_manager(f):
return func(slf, f)
return wrapper
# Convenience decorator for functions that explicitly take in a BackendIndex,
# instead of indirectly taking one in as a closure
def with_native_function_and_index(
func: Callable[[F, BackendIndex], T]
) -> Callable[[F, BackendIndex], T]:
@functools.wraps(func)
def wrapper(f: F, backend_index: BackendIndex) -> T:
with native_function_manager(f):
return func(f, backend_index)
return wrapper
# Convenience decorator for functions that explicitly take in a Dict of BackendIndices
def with_native_function_and_indices(
func: Callable[[F, Dict[DispatchKey, BackendIndex]], T]
) -> Callable[[F, Dict[DispatchKey, BackendIndex]], T]:
@functools.wraps(func)
def wrapper(f: F, backend_indices: Dict[DispatchKey, BackendIndex]) -> T:
with native_function_manager(f):
return func(f, backend_indices)
return wrapper
| pytorch-master | torchgen/context.py |
import contextlib
import functools
import hashlib
import os
import re
import sys
import textwrap
from argparse import Namespace
from dataclasses import fields, is_dataclass
from enum import Enum
from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
Iterator,
List,
NoReturn,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
Union,
)
from typing_extensions import Literal
from torchgen.code_template import CodeTemplate
# Safely load fast C Yaml loader/dumper if they are available
try:
from yaml import CSafeLoader as Loader
except ImportError:
from yaml import SafeLoader as Loader # type: ignore[misc]
try:
from yaml import CSafeDumper as Dumper
except ImportError:
from yaml import SafeDumper as Dumper # type: ignore[misc]
YamlDumper = Dumper
# A custom loader for YAML that errors on duplicate keys.
# This doesn't happen by default: see https://github.com/yaml/pyyaml/issues/165
class YamlLoader(Loader):
def construct_mapping(self, node, deep=False): # type: ignore[no-untyped-def]
mapping = []
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep) # type: ignore[no-untyped-call]
assert (
key not in mapping
), f"Found a duplicate key in the yaml. key={key}, line={node.start_mark.line}"
mapping.append(key)
mapping = super().construct_mapping(node, deep=deep) # type: ignore[no-untyped-call]
return mapping
# Many of these functions share logic for defining both the definition
# and declaration (for example, the function signature is the same), so
# we organize them into one function that takes a Target to say which
# code we want.
#
# This is an OPEN enum (we may add more cases to it in the future), so be sure
# to explicitly specify with Union[Literal[Target.XXX]] what targets are valid
# for your use.
Target = Enum(
"Target",
(
# top level namespace (not including at)
"DEFINITION",
"DECLARATION",
# TORCH_LIBRARY(...) { ... }
"REGISTRATION",
# namespace { ... }
"ANONYMOUS_DEFINITION",
# namespace cpu { ... }
"NAMESPACED_DEFINITION",
"NAMESPACED_DECLARATION",
),
)
# Matches "foo" in "foo, bar" but not "foobar". Used to search for the
# occurrence of a parameter in the derivative formula
IDENT_REGEX = r"(^|\W){}($|\W)"
# TODO: Use a real parser here; this will get bamboozled
def split_name_params(schema: str) -> Tuple[str, List[str]]:
m = re.match(r"(\w+)(\.\w+)?\((.*)\)", schema)
if m is None:
raise RuntimeError(f"Unsupported function schema: {schema}")
name, _, params = m.groups()
return name, params.split(", ")
T = TypeVar("T")
S = TypeVar("S")
# These two functions purposely return generators in analogy to map()
# so that you don't mix up when you need to list() them
# Map over function that may return None; omit Nones from output sequence
def mapMaybe(func: Callable[[T], Optional[S]], xs: Iterable[T]) -> Iterator[S]:
for x in xs:
r = func(x)
if r is not None:
yield r
# Map over function that returns sequences and cat them all together
def concatMap(func: Callable[[T], Sequence[S]], xs: Iterable[T]) -> Iterator[S]:
for x in xs:
for r in func(x):
yield r
# Conveniently add error context to exceptions raised. Lets us
# easily say that an error occurred while processing a specific
# context.
@contextlib.contextmanager
def context(msg_fn: Callable[[], str]) -> Iterator[None]:
try:
yield
except Exception as e:
# TODO: this does the wrong thing with KeyError
msg = msg_fn()
msg = textwrap.indent(msg, " ")
msg = f"{e.args[0]}\n{msg}" if e.args else msg
e.args = (msg,) + e.args[1:]
raise
# A little trick from https://github.com/python/mypy/issues/6366
# for getting mypy to do exhaustiveness checking
# TODO: put this somewhere else, maybe
def assert_never(x: NoReturn) -> NoReturn:
raise AssertionError("Unhandled type: {}".format(type(x).__name__))
@functools.lru_cache(maxsize=None)
def _read_template(template_fn: str) -> CodeTemplate:
return CodeTemplate.from_file(template_fn)
# String hash that's stable across different executions, unlike builtin hash
def string_stable_hash(s: str) -> int:
sha1 = hashlib.sha1(s.encode("latin1")).digest()
return int.from_bytes(sha1, byteorder="little")
# A small abstraction for writing out generated files and keeping track
# of what files have been written (so you can write out a list of output
# files)
class FileManager:
install_dir: str
template_dir: str
dry_run: bool
filenames: Set[str]
def __init__(self, install_dir: str, template_dir: str, dry_run: bool) -> None:
self.install_dir = install_dir
self.template_dir = template_dir
self.filenames = set()
self.dry_run = dry_run
def _write_if_changed(self, filename: str, contents: str) -> None:
old_contents: Optional[str]
try:
with open(filename, "r") as f:
old_contents = f.read()
except IOError:
old_contents = None
if contents != old_contents:
# Create output directory if it doesn't exist
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as f:
f.write(contents)
# Read from template file and replace pattern with callable (type could be dict or str).
def substitute_with_template(
self, template_fn: str, env_callable: Callable[[], Union[str, Dict[str, Any]]]
) -> str:
template_path = os.path.join(self.template_dir, template_fn)
env = env_callable()
if isinstance(env, dict):
# TODO: Update the comment reference to the correct location
if "generated_comment" not in env:
comment = "@" + "generated by torchgen/gen.py"
comment += " from {}".format(os.path.basename(template_path))
env["generated_comment"] = comment
template = _read_template(template_path)
return template.substitute(env)
elif isinstance(env, str):
return env
else:
assert_never(env)
def write_with_template(
self,
filename: str,
template_fn: str,
env_callable: Callable[[], Union[str, Dict[str, Any]]],
) -> None:
filename = "{}/{}".format(self.install_dir, filename)
assert filename not in self.filenames, "duplicate file write {filename}"
self.filenames.add(filename)
if not self.dry_run:
substitute_out = self.substitute_with_template(
template_fn=template_fn,
env_callable=env_callable,
)
self._write_if_changed(filename=filename, contents=substitute_out)
def write(
self,
filename: str,
env_callable: Callable[[], Union[str, Union[str, Dict[str, Any]]]],
) -> None:
self.write_with_template(filename, filename, env_callable)
def write_sharded(
self,
filename: str,
items: Iterable[T],
*,
key_fn: Callable[[T], str],
env_callable: Callable[[T], Dict[str, List[str]]],
num_shards: int,
base_env: Optional[Dict[str, Any]] = None,
sharded_keys: Set[str],
) -> None:
everything: Dict[str, Any] = {"shard_id": "Everything"}
shards: List[Dict[str, Any]] = [
{"shard_id": f"_{i}"} for i in range(num_shards)
]
all_shards = [everything] + shards
if base_env is not None:
for shard in all_shards:
shard.update(base_env)
for key in sharded_keys:
for shard in all_shards:
if key in shard:
assert isinstance(
shard[key], list
), "sharded keys in base_env must be a list"
shard[key] = shard[key].copy()
else:
shard[key] = []
def merge_env(into: Dict[str, List[str]], from_: Dict[str, List[str]]) -> None:
for k, v in from_.items():
assert k in sharded_keys, f"undeclared sharded key {k}"
into[k] += v
if self.dry_run:
# Dry runs don't write any templates, so incomplete environments are fine
items = ()
for item in items:
key = key_fn(item)
sid = string_stable_hash(key) % num_shards
env = env_callable(item)
merge_env(shards[sid], env)
merge_env(everything, env)
dot_pos = filename.rfind(".")
if dot_pos == -1:
dot_pos = len(filename)
base_filename = filename[:dot_pos]
extension = filename[dot_pos:]
for shard in all_shards:
shard_id = shard["shard_id"]
self.write_with_template(
f"{base_filename}{shard_id}{extension}", filename, lambda: shard
)
# filenames is used to track compiled files, but FooEverything.cpp isn't meant to be compiled
self.filenames.discard(
f"{self.install_dir}/{base_filename}Everything{extension}"
)
def write_outputs(self, variable_name: str, filename: str) -> None:
"""Write a file containing the list of all outputs which are
generated by this script."""
content = "set({}\n {})".format(
variable_name,
"\n ".join('"' + name + '"' for name in sorted(self.filenames)),
)
self._write_if_changed(filename, content)
# Helper function to generate file manager
def make_file_manager(
options: Namespace, install_dir: Optional[str] = None
) -> FileManager:
template_dir = os.path.join(options.source_path, "templates")
install_dir = install_dir if install_dir else options.install_dir
return FileManager(
install_dir=install_dir, template_dir=template_dir, dry_run=options.dry_run
)
# Helper function to create a pretty representation for dataclasses
def dataclass_repr(
obj: Any,
indent: int = 0,
width: int = 80,
) -> str:
# built-in pprint module support dataclasses from python 3.10
if sys.version_info >= (3, 10):
from pprint import pformat
return pformat(obj, indent, width)
return _pformat(obj, indent=indent, width=width)
def _pformat(
obj: Any,
indent: int,
width: int,
curr_indent: int = 0,
) -> str:
assert is_dataclass(obj), f"obj should be a dataclass, received: {type(obj)}"
class_name = obj.__class__.__name__
# update current indentation level with class name
curr_indent += len(class_name) + 1
fields_list = [(f.name, getattr(obj, f.name)) for f in fields(obj) if f.repr]
fields_str = []
for name, attr in fields_list:
# update the current indent level with the field name
# dict, list, set and tuple also add indent as done in pprint
_curr_indent = curr_indent + len(name) + 1
if is_dataclass(attr):
str_repr = _pformat(attr, indent, width, _curr_indent)
elif isinstance(attr, dict):
str_repr = _format_dict(attr, indent, width, _curr_indent)
elif isinstance(attr, (list, set, tuple)):
str_repr = _format_list(attr, indent, width, _curr_indent)
else:
str_repr = repr(attr)
fields_str.append(f"{name}={str_repr}")
indent_str = curr_indent * " "
body = f",\n{indent_str}".join(fields_str)
return f"{class_name}({body})"
def _format_dict(
attr: Dict[Any, Any],
indent: int,
width: int,
curr_indent: int,
) -> str:
curr_indent += indent + 3
dict_repr = []
for k, v in attr.items():
k_repr = repr(k)
v_str = (
_pformat(v, indent, width, curr_indent + len(k_repr))
if is_dataclass(v)
else repr(v)
)
dict_repr.append(f"{k_repr}: {v_str}")
return _format(dict_repr, indent, width, curr_indent, "{", "}")
def _format_list(
attr: Union[List[Any], Set[Any], Tuple[Any, ...]],
indent: int,
width: int,
curr_indent: int,
) -> str:
curr_indent += indent + 1
list_repr = [
_pformat(l, indent, width, curr_indent) if is_dataclass(l) else repr(l)
for l in attr
]
start, end = ("[", "]") if isinstance(attr, list) else ("(", ")")
return _format(list_repr, indent, width, curr_indent, start, end)
def _format(
fields_str: List[str],
indent: int,
width: int,
curr_indent: int,
start: str,
end: str,
) -> str:
delimiter, curr_indent_str = "", ""
# if it exceed the max width then we place one element per line
if len(repr(fields_str)) >= width:
delimiter = "\n"
curr_indent_str = " " * curr_indent
indent_str = " " * indent
body = f", {delimiter}{curr_indent_str}".join(fields_str)
return f"{start}{indent_str}{body}{end}"
class NamespaceHelper:
"""A helper for constructing the namespace open and close strings for a nested set of namespaces.
e.g. for namespace_str torch::lazy,
prologue:
namespace torch {
namespace lazy {
epilogue:
} // namespace lazy
} // namespace torch
"""
def __init__(self, namespace_str: str, entity_name: str = "", max_level: int = 2):
# cpp_namespace can be a colon joined string such as torch::lazy
cpp_namespaces = namespace_str.split("::")
assert (
len(cpp_namespaces) <= max_level
), f"Codegen doesn't support more than {max_level} level(s) of custom namespace. Got {namespace_str}."
self.cpp_namespace_ = namespace_str
self.prologue_ = "\n".join([f"namespace {n} {{" for n in cpp_namespaces])
self.epilogue_ = "\n".join(
[f"}} // namespace {n}" for n in reversed(cpp_namespaces)]
)
self.namespaces_ = cpp_namespaces
self.entity_name_ = entity_name
@staticmethod
def from_namespaced_entity(
namespaced_entity: str, max_level: int = 2
) -> "NamespaceHelper":
"""
Generate helper from nested namespaces as long as class/function name. E.g.: "torch::lazy::add"
"""
names = namespaced_entity.split("::")
entity_name = names[-1]
namespace_str = "::".join(names[:-1])
return NamespaceHelper(
namespace_str=namespace_str, entity_name=entity_name, max_level=max_level
)
@property
def prologue(self) -> str:
return self.prologue_
@property
def epilogue(self) -> str:
return self.epilogue_
@property
def entity_name(self) -> str:
return self.entity_name_
# Only allow certain level of namespaces
def get_cpp_namespace(self, default: str = "") -> str:
"""
Return the namespace string from joining all the namespaces by "::" (hence no leading "::").
Return default if namespace string is empty.
"""
return self.cpp_namespace_ if self.cpp_namespace_ else default
class OrderedSet(Generic[T]):
storage: Dict[T, Literal[None]]
def __init__(self, iterable: Optional[Iterable[T]] = None):
if iterable is None:
self.storage = {}
else:
self.storage = {k: None for k in iterable}
def __contains__(self, item: T) -> bool:
return item in self.storage
def __iter__(self) -> Iterator[T]:
return iter(self.storage.keys())
def update(self, items: "OrderedSet[T]") -> None:
self.storage.update(items.storage)
def add(self, item: T) -> None:
self.storage[item] = None
def copy(self) -> "OrderedSet[T]":
ret: OrderedSet[T] = OrderedSet()
ret.storage = self.storage.copy()
return ret
@staticmethod
def union(*args: "OrderedSet[T]") -> "OrderedSet[T]":
ret = args[0].copy()
for s in args[1:]:
ret.update(s)
return ret
def __or__(self, other: "OrderedSet[T]") -> "OrderedSet[T]":
return OrderedSet.union(self, other)
def __ior__(self, other: "OrderedSet[T]") -> "OrderedSet[T]":
self.update(other)
return self
def __eq__(self, other: object) -> bool:
if isinstance(other, OrderedSet):
return self.storage == other.storage
else:
return set(self.storage.keys()) == other
| pytorch-master | torchgen/utils.py |
import argparse
import os
import pathlib
import re
from collections import Counter, defaultdict, namedtuple
from typing import Dict, List, Optional, Sequence, Union
import yaml
import torchgen.api.dispatcher as dispatcher
import torchgen.dest as dest
from torchgen.api.types import DispatcherSignature
from torchgen.code_template import CodeTemplate
from torchgen.context import native_function_manager
from torchgen.gen import get_grouped_native_functions, parse_native_yaml
from torchgen.model import (
BackendIndex,
BackendMetadata,
DispatchKey,
NativeFunction,
NativeFunctionsGroup,
OperatorName,
)
from torchgen.selective_build.selector import SelectiveBuilder
from torchgen.utils import (
concatMap,
context,
FileManager,
NamespaceHelper,
Target,
YamlLoader,
)
# Parses the external backend's yaml, and adds a new BackendIndex for the backend's dispatch key.
# Returns a Tuple of (backend_key, autograd_key, cpp_namespace, updated BackendIndex mapping)
ParsedExternalYaml = namedtuple(
"ParsedExternalYaml",
["backend_key", "autograd_key", "class_name", "cpp_namespace", "backend_indices"],
)
def parse_backend_yaml(
backend_yaml_path: str,
grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
backend_indices: Dict[DispatchKey, BackendIndex],
) -> ParsedExternalYaml:
native_functions_map: Dict[OperatorName, NativeFunction] = {
f.func.name: f
for f in concatMap(
lambda f: [f] if isinstance(f, NativeFunction) else list(f.functions()),
grouped_native_functions,
)
}
with open(backend_yaml_path, "r") as f:
yaml_values = yaml.load(f, Loader=YamlLoader)
assert isinstance(yaml_values, dict)
valid_keys = [
"backend",
"class_name",
"cpp_namespace",
"extra_headers",
"supported",
"autograd",
"full_codegen",
"non_native",
"ir_gen",
]
backend = yaml_values.pop("backend", None)
assert backend is not None, 'You must provide a value for "backend"'
class_name = yaml_values.pop("class_name", None)
cpp_namespace = yaml_values.pop("cpp_namespace", None)
assert cpp_namespace is not None, 'You must provide a value for "cpp_namespace"'
# Mostly just defaulting to false to stick with LazyTensor convention.
use_out_as_primary = yaml_values.pop("use_out_as_primary", False)
assert isinstance(
use_out_as_primary, bool
), f"You must provide either True or False for use_out_as_primary. Provided: {use_out_as_primary}"
use_device_guard = yaml_values.pop("device_guard", False)
assert isinstance(
use_device_guard, bool
), f"You must provide either True or False for device_guard. Provided: {use_device_guard}"
supported = yaml_values.pop("supported", [])
if supported is None:
supported = [] # Allow an empty list of supported ops
assert isinstance(
supported, list
), f'expected "supported" to be a list, but got: {supported} (of type {type(supported)})'
supported_autograd = yaml_values.pop("autograd", [])
assert isinstance(
supported_autograd, list
), f'expected "autograd" to be a list, but got: {supported_autograd}'
# full_codegen is ignored by parse_backend_yaml, and re-parsed in gen_lazy_tensor.py
full_codegen = yaml_values.pop("full_codegen", [])
supported.extend(full_codegen)
# non_native is ignored by parse_backend_yaml, and re-parsed in gen_lazy_tensor.py
non_native = yaml_values.pop("non_native", {})
# ir_gen is ignored by parse_backend_yaml, and re-parsed in gen_lazy_tensor.py
_ = yaml_values.pop("ir_gen", {})
assert (
len(yaml_values.keys()) == 0
), f'{backend_yaml_path} contains unexpected keys: {", ".join(yaml_values.keys())}. \
Only the following keys are supported: {", ".join(valid_keys)}'
def create_backend_index(
backend_ops: List[str],
dispatch_key: DispatchKey,
*,
use_out_as_primary: bool,
use_device_guard: bool,
) -> BackendIndex:
metadata: Dict[OperatorName, BackendMetadata] = {}
for op in backend_ops:
op_name = OperatorName.parse(op)
assert (
op_name in native_functions_map
), f"Found an invalid operator name: {op_name}"
# See Note [External Backends Follow Dispatcher API]
kernel_name = dispatcher.name(native_functions_map[op_name].func)
# TODO: allow structured external backends later.
m = BackendMetadata(
kernel=kernel_name, structured=False, cpp_namespace=cpp_namespace
)
metadata[op_name] = m
return BackendIndex(
dispatch_key=dispatch_key,
use_out_as_primary=use_out_as_primary,
external=True,
device_guard=use_device_guard,
index=metadata,
)
backend_key: Optional[DispatchKey] = None
if len(supported) > 0:
with context(
lambda: f'The provided value for "backend" must be a valid DispatchKey, but got {backend}.'
):
backend_key = DispatchKey.parse(backend)
backend_idx = create_backend_index(
supported,
backend_key,
use_out_as_primary=use_out_as_primary,
use_device_guard=use_device_guard,
)
assert backend_key not in backend_indices
backend_indices[backend_key] = backend_idx
autograd_key: Optional[DispatchKey] = None
if len(supported_autograd) > 0:
with context(
lambda: f'The "autograd" key was specified, which indicates that you would like to override \
the behavior of autograd for some operators on your backend. However "Autograd{backend}" is not a valid DispatchKey.'
):
autograd_key = DispatchKey.parse(f"Autograd{backend}")
autograd_idx = create_backend_index(
supported_autograd,
autograd_key,
use_out_as_primary=use_out_as_primary,
use_device_guard=use_device_guard,
)
assert autograd_key not in backend_indices
backend_indices[autograd_key] = autograd_idx
for g in grouped_native_functions:
if isinstance(g, NativeFunction):
forward_kernels = (
[]
if backend_key is None
else [
m
for m in [backend_indices[backend_key].get_kernel(g)]
if m is not None
]
)
backward_kernels = (
[]
if autograd_key is None
else [
m
for m in [backend_indices[autograd_key].get_kernel(g)]
if m is not None
]
)
else:
forward_kernels = (
[]
if backend_key is None
else [
m
for m in [
backend_indices[backend_key].get_kernel(f)
for f in g.functions()
]
if m is not None
]
)
backward_kernels = (
[]
if autograd_key is None
else [
m
for m in [
backend_indices[autograd_key].get_kernel(f)
for f in g.functions()
]
if m is not None
]
)
forward_kernels = [f for f in forward_kernels if f is not None]
backward_kernels = [f for f in backward_kernels if f is not None]
assert (
len(forward_kernels) == 0 or len(backward_kernels) == 0
), f'Currently, all variants of an op must either be registered to a backend key, or to a backend\'s \
autograd key. They cannot be mix and matched. If this is something you need, feel free to create an issue! \
{forward_kernels[0].kernel} is listed under "supported", but {backward_kernels[0].kernel} is listed under "autograd".'
return ParsedExternalYaml(
backend_key, autograd_key, class_name, cpp_namespace, backend_indices
)
def error_on_missing_kernels(
native_functions: Sequence[NativeFunction],
backend_indices: Dict[DispatchKey, BackendIndex],
backend_key: DispatchKey,
autograd_key: Optional[DispatchKey],
class_name: str,
kernel_defn_file_path: str,
full_codegen: Optional[List[OperatorName]] = None,
) -> None:
try:
with open(kernel_defn_file_path, "r") as f:
backend_defns = f.read()
except IOError:
raise AssertionError(
f"Unable to read from the specified impl_path file: {kernel_defn_file_path}"
)
if full_codegen is None:
full_codegen = []
expected_backend_op_names: List[OperatorName] = (
list(backend_indices[backend_key].index.keys()) + []
if autograd_key is None
else list(backend_indices[autograd_key].index.keys())
)
expected_backend_native_funcs: List[NativeFunction] = [
f
for f in native_functions
if f.func.name in expected_backend_op_names and f.func.name not in full_codegen
]
expected_backend_kernel_name_counts: Dict[str, List[NativeFunction]] = defaultdict(
list
)
for native_f in expected_backend_native_funcs:
expected_backend_kernel_name_counts[dispatcher.name(native_f.func)].append(
native_f
)
# This just looks for lines containing "foo(", and assumes that the kernel foo has been implemented.
# It might cause false negatives (we won't catch all cases), but that's ok - if we catch a missing kernel
# here, then we get a nicer error message. If we miss it, you get a linker error.
kernel_defn_regex = rf"{class_name}::\s*([\w\d]*)\("
actual_backend_kernel_name_counts = Counter(
re.findall(kernel_defn_regex, backend_defns)
)
missing_kernels_err_msg = ""
for expected_name, funcs in expected_backend_kernel_name_counts.items():
expected_overload_count = len(funcs)
actual_overload_count = actual_backend_kernel_name_counts[expected_name]
if expected_overload_count != actual_overload_count:
def create_decl(f: NativeFunction) -> str:
with native_function_manager(f):
return DispatcherSignature.from_schema(f.func).decl()
expected_schemas_str = "\n".join([create_decl(f) for f in funcs])
missing_kernels_err_msg += f"""
{class_name} is missing a kernel definition for {expected_name}. We found {actual_overload_count} kernel(s) with that name,
but expected {expected_overload_count} kernel(s). The expected function schemas for the missing operator are:
{expected_schemas_str}
"""
assert missing_kernels_err_msg == "", missing_kernels_err_msg
def main() -> None:
parser = argparse.ArgumentParser(description="Generate backend stub files")
parser.add_argument(
"-s",
"--source_yaml",
help="path to source yaml file containing operator external definitions",
)
parser.add_argument("-o", "--output_dir", help="output directory")
parser.add_argument("--dry_run", type=bool, default=False, help="output directory")
parser.add_argument(
"--impl_path",
type=str,
default=None,
help="path to the source C++ file containing kernel definitions",
)
options = parser.parse_args()
run(options.source_yaml, options.output_dir, options.dry_run, options.impl_path)
def gen_dispatchkey_nativefunc_headers(
fm: FileManager,
class_name: str,
cpp_namespace: str,
backend_indices: Dict[DispatchKey, BackendIndex],
grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
backend_dispatch_key: DispatchKey,
autograd_dispatch_key: Optional[DispatchKey],
backend_name: str = "",
) -> None:
assert class_name is not None
generated_comment = (
"Autogenerated file by gen_backend_stubs.py. Do not edit directly!"
)
# Convert to a set first to remove duplicate kernel names.
# Backends are allowed to repeat kernel names; only generate the declaration once!
# Sort for deterministic output.
backend_declarations = list(
sorted(
set(
concatMap(
lambda f: dest.compute_native_function_declaration(
f, backend_indices[backend_dispatch_key]
),
grouped_native_functions,
)
)
)
)
autograd_declarations = list(
sorted(
set(
concatMap(
lambda f: []
if autograd_dispatch_key is None
else dest.compute_native_function_declaration(
f, backend_indices[autograd_dispatch_key]
),
grouped_native_functions,
)
)
)
)
ns_helper = NamespaceHelper(cpp_namespace)
fm.write_with_template(
f"{backend_dispatch_key}NativeFunctions.h",
"DispatchKeyNativeFunctions.h",
lambda: {
"generated_comment": generated_comment,
"namespace_prologue": ns_helper.prologue,
"class_name": class_name,
"namespace_epilogue": ns_helper.epilogue,
"dispatch_declarations": backend_declarations + autograd_declarations,
"BackendName": backend_name,
"DispatchKey": backend_dispatch_key,
},
)
def gen_dispatcher_registrations(
fm: FileManager,
output_dir: str,
class_name: str,
backend_indices: Dict[DispatchKey, BackendIndex],
grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
backend_dispatch_key: DispatchKey,
dispatch_key: DispatchKey,
selector: "SelectiveBuilder",
# build_in_tree is true for lazy TS backend and affects include paths, not used for external backends
build_in_tree: bool = False,
per_operator_headers: bool = False,
backend_name: str = "",
eager_registration: bool = True,
) -> None:
headers = [
f"{output_dir}/{backend_dispatch_key}NativeFunctions.h",
]
if build_in_tree:
external_backend_headers_str = "\n".join(f"#include <{h}>" for h in headers)
else:
external_backend_headers_str = "\n".join(f'#include "{h}"' for h in headers)
assert class_name is not None
backend_index = backend_indices[dispatch_key]
dispatch_registrations_body = list(
concatMap(
dest.RegisterDispatchKey(
backend_index,
Target.REGISTRATION,
selector,
rocm=False,
class_method_name=f"{class_name}",
skip_dispatcher_op_registration=False,
),
grouped_native_functions,
)
)
newline = "\n"
ns_helper = NamespaceHelper(namespace_str="at")
deferred_dispatch_registrations = ""
static_init_dispatch_registrations = ""
if eager_registration:
static_template = CodeTemplate(
"""\
TORCH_LIBRARY_IMPL(aten, $dispatch_key, m) {
$dispatch_registrations_body
};"""
)
static_init_dispatch_registrations = static_template.substitute(
dispatch_key=dispatch_key,
dispatch_registrations_body=dispatch_registrations_body,
)
else:
deferred_template = CodeTemplate(
"""\
TORCH_API void Register${backend_name}${dispatch_key}NativeFunctions() {
static auto m = MAKE_TORCH_LIBRARY_IMPL(aten, $dispatch_key);
$dispatch_registrations_body
}"""
)
deferred_dispatch_registrations = deferred_template.substitute(
backend_name=backend_name,
dispatch_key=dispatch_key,
dispatch_registrations_body=dispatch_registrations_body,
)
fm.write_with_template(
f"Register{dispatch_key}.cpp",
"RegisterDispatchKey.cpp",
lambda: {
"extra_cuda_headers": "",
"external_backend_headers": external_backend_headers_str,
"ops_headers": "#include <ATen/Functions.h>"
if not per_operator_headers
else "",
"DispatchKey": dispatch_key,
"dispatch_namespace": dispatch_key.lower(),
"dispatch_headers": dest.gen_registration_headers(
backend_index, per_operator_headers=per_operator_headers, rocm=False
),
"dispatch_definitions": fm.substitute_with_template(
"RegisterDispatchDefinitions.ini",
lambda: {
"ns_prologue": ns_helper.prologue,
"ns_epilogue": ns_helper.epilogue,
"static_init_dispatch_registrations": static_init_dispatch_registrations,
"deferred_dispatch_registrations": deferred_dispatch_registrations,
"dispatch_helpers": dest.gen_registration_helpers(backend_index),
"dispatch_namespace": dispatch_key.lower(),
"dispatch_namespaced_definitions": "",
"dispatch_anonymous_definitions": list(
concatMap(
dest.RegisterDispatchKey(
backend_index,
Target.ANONYMOUS_DEFINITION,
selector,
rocm=False,
class_method_name=f"{class_name}",
skip_dispatcher_op_registration=False,
),
grouped_native_functions,
)
),
},
).split(newline),
},
)
def run(
source_yaml: str, output_dir: str, dry_run: bool, impl_path: Optional[str] = None
) -> None:
# Assumes that this file lives at PYTORCH_ROOT/torchgen/gen_backend_stubs.py
pytorch_root = pathlib.Path(__file__).parent.parent.absolute()
template_dir = os.path.join(pytorch_root, "aten/src/ATen/templates")
def make_file_manager(install_dir: str) -> FileManager:
return FileManager(
install_dir=install_dir, template_dir=template_dir, dry_run=dry_run
)
fm = make_file_manager(output_dir)
native_yaml_path = os.path.join(
pytorch_root, "aten/src/ATen/native/native_functions.yaml"
)
tags_yaml_path = os.path.join(pytorch_root, "aten/src/ATen/native/tags.yaml")
parsed_yaml = parse_native_yaml(native_yaml_path, tags_yaml_path)
native_functions, backend_indices = (
parsed_yaml.native_functions,
parsed_yaml.backend_indices,
)
grouped_native_functions = get_grouped_native_functions(native_functions)
parsed_backend_yaml = parse_backend_yaml(
source_yaml, grouped_native_functions, backend_indices
)
backend_key = parsed_backend_yaml.backend_key
autograd_key = parsed_backend_yaml.autograd_key
cpp_namespace = parsed_backend_yaml.cpp_namespace
class_name = parsed_backend_yaml.class_name
backend_indices = parsed_backend_yaml.backend_indices
selector = SelectiveBuilder.get_nop_selector()
if backend_key is None:
# This could be useful if a backend wants to quickly set up a noop yaml file but doesn't have any kernels ready yet.
return
if class_name is None:
# class_name is an optional argument to backend yaml file.
# if specified it allows an external backend to override
# the name of the class that all generated kernel definitions live under.
# if not specified, its value is given as native_function_class_name.
class_name = backend_indices[backend_key].native_function_class_name()
assert class_name is not None
if impl_path is not None:
error_on_missing_kernels(
native_functions,
backend_indices,
backend_key,
autograd_key,
class_name,
impl_path,
)
gen_dispatchkey_nativefunc_headers(
fm,
class_name,
cpp_namespace,
backend_indices,
grouped_native_functions,
backend_key,
autograd_key,
)
for dispatch_key in (
[backend_key] if autograd_key is None else [backend_key, autograd_key]
):
gen_dispatcher_registrations(
fm,
output_dir,
class_name,
backend_indices,
grouped_native_functions,
backend_key,
dispatch_key,
selector,
)
if __name__ == "__main__":
main()
| pytorch-master | torchgen/gen_backend_stubs.py |
import argparse
import os
import pathlib
import re
from collections import Counter, namedtuple
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
)
import yaml
import torchgen.dest as dest
from torchgen.api.lazy import setValueT
from torchgen.api.types import BaseCppType
from torchgen.dest.lazy_ir import GenLazyIR, GenTSLazyIR
from torchgen.gen import get_grouped_native_functions, parse_native_yaml
from torchgen.model import NativeFunction, NativeFunctionsGroup, OperatorName
from torchgen.selective_build.selector import SelectiveBuilder
from torchgen.utils import concatMap, FileManager, NamespaceHelper, YamlLoader
from .gen_backend_stubs import (
error_on_missing_kernels,
gen_dispatcher_registrations,
gen_dispatchkey_nativefunc_headers,
parse_backend_yaml,
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# Lazy Tensor Codegen
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# Overview
# ~~~~~~~~
#
# This codegen script builds on existing data models and helpers used
# by all ATen backends, and adds new functionality specific to lazy
# tensor backends.
#
# Inputs:
# - <backend>_native_functions.yaml: controls which operators are
# supported by the backend.
#
# Outputs:
# (for all backends)
# <DispatchKey>Ir.h defines Lazy IR classes to be constructed during tracing
# - opt-in: also generate 'lowering' methods for the TorchScript backend only
# <DispatchKey>NativeFunctions.cpp defines implementations of native functions which perform lazy tracing
# - opt-in: 'full_codegen' section of backend yaml; 'supported' section omits these implementations
# <DispatchKey>NativeFunctions.h declares implementations of native functions for both 'supported' and 'full_codegen'
# ops
#
# Register<DispatchKey>.cpp registers all op implementations with the dispatcher
# RegisterAutograd<DispatchKey>.cpp registers all autograd implementations with the dispatcher
#
# Validation Helpers:
# - Shape Inference: errs if any ops in backend yaml require shape inference not provided by meta kernels or
# implementations in torch/csrc/lazy/core/shape_inference.*
# - native function impls: errs if any 'supported' ops do not have an implementation defined in the backend
# (non-codegen) implementation file
#
#
# About the Data Model
# ~~~~~~~~~~~~~~~~~~~~
#
# Modeled after ATen codegen, the first step is to parse yaml and build a data model for the operators
# we care about. In this case, the <backend>_native_functions yaml defines a subset of the core operators
# (defined in more detail in the main native_functions.yaml), which will be supported by your backend.
# Backends can list ops in two categories:
# - `supported` ops require hand-implementations but still get codegenned declarations and registrations
# - `full_codegen` ops get implementations (and IR classes) generated too
#
# Each native function is modeled as an object with a schema, and each schema has objects representing their
# arguments. Much of the codegen is manipulation of the arguments and their types. For example, lazy tensor
# backends need to transform 'at::Tensor' arguments into 'lazy::Value' objects, as well as replacing reference
# types (stringref) with actual string objects, and this is done by manipulating the data model objects.
# - see api/lazy.py for the lazy data model
#
# Once the data model is set up, the rest of this script processes a number of templates for output CPP file
# and fills in the template values using helpers in `dest/lazy_ir.py` and `dest/lazy_ts_lowering.py`. These
# helpers mostly iterate over functions and their arguments, outputting different c++ snippets.
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# Parses the external backend's yaml, and adds a new BackendIndex for the backend's dispatch key.
# Returns a Tuple of (backend_key, autograd_key, cpp_namespace, updated BackendIndex mapping, full_codegen)
ParsedExternalYaml = namedtuple(
"ParsedExternalYaml",
["backend_key", "autograd_key", "cpp_namespace", "backend_indices", "full_codegen"],
)
def parse_native_functions_keys(
backend_yaml_path: str,
grouped_native_functions: Sequence[Union[NativeFunction, NativeFunctionsGroup]],
) -> Tuple[List[OperatorName], List[Any], List[OperatorName]]:
native_functions_map: Dict[OperatorName, NativeFunction] = {
f.func.name: f
for f in concatMap(
lambda f: [f] if isinstance(f, NativeFunction) else list(f.functions()),
grouped_native_functions,
)
}
with open(backend_yaml_path, "r") as f:
yaml_values = yaml.load(f, Loader=YamlLoader)
assert isinstance(yaml_values, dict)
full_codegen = yaml_values.pop("full_codegen", [])
non_native = yaml_values.pop("non_native", [])
ir_gen = yaml_values.pop("ir_gen", [])
assert isinstance(full_codegen, list)
assert isinstance(non_native, list)
assert isinstance(ir_gen, list)
full_codegen_opnames = [OperatorName.parse(name) for name in full_codegen]
ir_gen_opnames = [OperatorName.parse(name) for name in ir_gen]
return full_codegen_opnames, non_native, ir_gen_opnames
def validate_shape_inference_header(
shape_inference_hdr: str, expected_shape_infr_decls: List[str]
) -> None:
try:
with open(shape_inference_hdr, "r") as f:
shape_infr_decls = f.read()
shape_infr_decl_lines = set(shape_infr_decls.split("\n"))
except IOError:
raise AssertionError(
f"Unable to read from the specified shape_inference_hdr file: {shape_inference_hdr}"
)
shape_infr_regex = r"compute_shape_(\w+)"
actual_shape_infr_name_counts = Counter(
re.findall(shape_infr_regex, shape_infr_decls)
)
# TODO(whc) add a check for shape inference functions that have meta kernels implement and should be retired.
missing_decls = [
decl for decl in expected_shape_infr_decls if decl not in shape_infr_decl_lines
]
if missing_decls:
raise Exception(
f"""Missing shape inference function.\n
Please add declare this function in {shape_inference_hdr}:\n
and implement it in the the corresponding shape_inference.cpp file.\n
{os.linesep.join(missing_decls)}"""
)
# Some helper functions for the codegen.
def get_ltc_helper_fns() -> str:
return """\
at::Tensor to_meta(const at::Tensor& tensor) {
// undefined tensors can't be converted to the meta device, since they don't have sizes/strides
if (!tensor.defined()) return tensor;
auto out = at::native::empty_strided_meta(tensor.sizes(), tensor.strides(), \
/*dtype=*/c10::make_optional(tensor.scalar_type()), /*layout=*/c10::make_optional(tensor.layout()), \
/*device=*/c10::make_optional(c10::Device(c10::kMeta)), /*pin_memory=*/c10::nullopt);
// needs to handle wrapped numbers, so dtype promotion works properly.
if (tensor.unsafeGetTensorImpl()->is_wrapped_number()) {
out.unsafeGetTensorImpl()->set_wrapped_number(true);
}
return out;
}
c10::optional<at::Tensor> to_meta(const c10::optional<at::Tensor>& tensor) {
if (tensor.has_value()) {
return to_meta(*tensor);
}
return c10::nullopt;
}
std::vector<at::Tensor> to_meta(const at::TensorList& t_list) {
std::vector<at::Tensor> outs;
outs.reserve(t_list.size());
for (const auto& i : c10::irange(t_list.size())) {
outs.push_back(to_meta(t_list[i]));
}
return outs;
}
"""
class default_args:
node_base: str = "Node"
node_base_hdr: Optional[str] = None
shape_inference_hdr: str = "torch/csrc/lazy/core/shape_inference.h"
tensor_class: str = "torch::lazy::LazyTensor"
tensor_class_hdr: str = "torch/csrc/lazy/core/tensor.h"
lazy_ir_generator: Type[GenLazyIR] = GenLazyIR
backend_name: str = "TorchScript"
def main() -> None:
parser = argparse.ArgumentParser(description="Generate Lazy Tensor backend files")
parser.add_argument(
"-s",
"--source_yaml",
help="path to source yaml file containing operator external definitions",
)
parser.add_argument("-o", "--output_dir", help="output directory")
parser.add_argument("--dry_run", type=bool, default=False, help="output directory")
parser.add_argument(
"--impl_path",
type=str,
default=None,
help="path to the source C++ file containing kernel definitions",
)
parser.add_argument(
"--gen_ts_lowerings",
action="store_true",
help="Generate TorchScript lowerings in addition to Lazy IR and NativeFunctions",
)
parser.add_argument(
"--node_base",
type=str,
default=default_args.node_base,
help="Name of backend specific custom Lazy IR Node base class",
)
parser.add_argument(
"--node_base_hdr",
type=str,
default=default_args.node_base_hdr,
help="Path to header file defining custom Lazy IR Node base class",
)
parser.add_argument(
"--shape_inference_hdr",
type=str,
default=default_args.shape_inference_hdr,
help="Path to header file defining custom Lazy shape inference functions",
)
parser.add_argument(
"--tensor_class",
type=str,
default=default_args.tensor_class,
help="Name of backend specific custom Lazy Tensor class",
)
parser.add_argument(
"--tensor_class_hdr",
type=str,
default=default_args.tensor_class_hdr,
help="Path to header file defining custom Lazy Tensor class",
)
parser.add_argument(
"--backend_name",
type=str,
default=default_args.backend_name,
help="Name of the backend to generate",
)
options = parser.parse_args()
# Assumes that this file lives at PYTORCH_ROOT/torchgen/gen_backend_stubs.py
torch_root = pathlib.Path(__file__).parent.parent.parent.absolute()
aten_path = str(torch_root / "aten" / "src" / "ATen")
lazy_ir_generator: Type[GenLazyIR] = default_args.lazy_ir_generator
if options.gen_ts_lowerings:
lazy_ir_generator = GenTSLazyIR
run_gen_lazy_tensor(
aten_path,
options.source_yaml,
options.output_dir,
options.dry_run,
options.impl_path,
options.node_base,
options.node_base_hdr,
options.tensor_class,
options.tensor_class_hdr,
options.shape_inference_hdr,
lazy_ir_generator,
options.backend_name,
)
def run_gen_lazy_tensor(
aten_path: str,
source_yaml: str,
output_dir: str,
dry_run: bool,
impl_path: Optional[str],
node_base: str = default_args.node_base,
node_base_hdr: Optional[str] = default_args.node_base_hdr,
tensor_class: str = default_args.tensor_class,
tensor_class_hdr: str = default_args.tensor_class_hdr,
shape_inference_hdr: str = default_args.shape_inference_hdr,
lazy_ir_generator: Type[GenLazyIR] = default_args.lazy_ir_generator,
# build_in_tree is true for TS backend and affects include paths
build_in_tree: bool = False,
# per_operator_headers changes whether ATen/Functions.h or individual operator headers are used
# it must match how ATen was built
per_operator_headers: bool = False,
backend_name: str = default_args.backend_name,
gen_forced_fallback_code: bool = False,
# the following arguments are temporary customization points for xla backend migration.
# do not rely on them otherwise, they should be removed once migration is complete
backend_namespace: str = "torch::lazy",
get_tensorlist: str = "GetTensorList",
get_tensor_or_wrap_number: str = "GetLtcTensorOrCreateForWrappedNumber",
try_get_tensor: str = "TryGetLtcTensor",
metrics_counter: str = 'TORCH_LAZY_FN_COUNTER("lazy::")',
create_tensor: str = "LazyTensor::Create",
create_from_first_tensor: bool = False,
create_aten_from_ltc_tensor: str = "torch::lazy::CreateAtenFromLtcTensor",
tuple_aten_from_ltc_tensors: str = "torch::lazy::TupleAtenFromLtcTensors",
lazy_value_class: str = "torch::lazy::Value",
lazy_tensor_ptr: str = "LazyTensorPtr",
get_device_fn: str = "torch::lazy::GetBackendDevice",
) -> None:
lv_tokens = lazy_value_class.split("::")
lv_class = lv_tokens[-1]
lv_ns = "::".join(lv_tokens[:-1])
setValueT(BaseCppType(lv_ns, lv_class))
template_dir = os.path.join(aten_path, "templates")
def make_file_manager(install_dir: str) -> FileManager:
return FileManager(
install_dir=install_dir, template_dir=template_dir, dry_run=dry_run
)
fm = make_file_manager(output_dir)
native_yaml_path = os.path.join(aten_path, "native/native_functions.yaml")
tags_yaml_path = os.path.join(aten_path, "native/tags.yaml")
parsed_yaml = parse_native_yaml(native_yaml_path, tags_yaml_path)
native_functions, backend_indices = (
parsed_yaml.native_functions,
parsed_yaml.backend_indices,
)
grouped_native_functions = get_grouped_native_functions(native_functions)
def sort_native_function(f: Union[NativeFunctionsGroup, NativeFunction]) -> str:
"""
We sort the native function because of the note in concat_map_codegen.
TODO(alanwaketan): Remove this sorting hack once all ops are grouped properly.
"""
func = f.functional.func if isinstance(f, NativeFunctionsGroup) else f.func
return str(func.name.name)
grouped_native_functions = sorted(
grouped_native_functions, key=sort_native_function
)
parsed_backend_yaml = parse_backend_yaml(
source_yaml, grouped_native_functions, backend_indices
)
backend_key = parsed_backend_yaml.backend_key
autograd_key = parsed_backend_yaml.autograd_key
cpp_namespace = parsed_backend_yaml.cpp_namespace
backend_indices = parsed_backend_yaml.backend_indices
# the following 3 keys are all processed differently
# for full_codegen, we generate IR, kernels, etc
# for ir_gen, we generate only IR
# non_native is used to register kernels not declared in
# native_functions.yaml
full_codegen, non_native, ir_gen = parse_native_functions_keys(
source_yaml, grouped_native_functions
)
def concat_map_codegen(
func: Callable[[NativeFunction], Sequence[str]],
xs: Iterable[Union[NativeFunctionsGroup, NativeFunction]],
ops_list: List[OperatorName] = full_codegen,
) -> Iterator[str]:
"""
We code-gen for the functional variant, which is all we need for IR classes/lowerings/shape inferences, but we
only code-gen additional entries for the inplace variant for the native functions.
"""
for x in xs:
fs = list(x.functions()) if isinstance(x, NativeFunctionsGroup) else [x]
for f in fs:
if f.func.name in ops_list:
for r in func(f):
yield r
selector = SelectiveBuilder.get_nop_selector()
assert backend_key is not None
class_name = backend_indices[backend_key].native_function_class_name()
if impl_path is not None:
error_on_missing_kernels(
native_functions,
backend_indices,
backend_key,
autograd_key,
class_name,
impl_path,
full_codegen,
)
""" Validate Shape Inference Definitions
Generated lazy native functions all perform shape inference, by first using a meta:: kernel
if available for that op, and otherwise using a 'compute_shape_{op}' function instead. The generator
knows the call signature for compute_shape_{op} becuase it matches the nativefunction (and meta::) signature,
so it just has to check whether the op is structured and generate a call for one or the other. It's up to the dev
to supply the missing compute_shape_{op} function, but the codegen at least warns you about this and provides
the expected signature which can be copy-pasted into shape_inference.h.
compute_shape_{op} functions are handwritten and should be replaced over time as ops get ported
to structured kernels.
See torch/csrc/lazy/core/shape_inference.cpp #READ THIS! for more information.
"""
if shape_inference_hdr is not None:
expected_shape_infr_decls = list(
concat_map_codegen(
dest.GenLazyShapeInferenceDefinition(
backend_indices[backend_key], tensor_class
),
grouped_native_functions,
)
)
validate_shape_inference_header(shape_inference_hdr, expected_shape_infr_decls)
assert class_name is not None
# Generate nativefunction declarations
# Note, eager registrations is set to False for the lazy TS backend as another LTC backend
# may want to register their own lazy kernels instead of registering the TS ones.
# The registration will lazily happen when init_ts_backend is called.
gen_dispatchkey_nativefunc_headers(
fm,
class_name,
cpp_namespace,
backend_indices,
grouped_native_functions,
backend_key,
autograd_key,
backend_name,
)
# Generate Dispatcher registrations which hook up the nativefunctions
for dispatch_key in (
[backend_key] if autograd_key is None else [backend_key, autograd_key]
):
gen_dispatcher_registrations(
fm,
output_dir,
class_name,
backend_indices,
grouped_native_functions,
backend_key,
dispatch_key,
selector,
build_in_tree=build_in_tree,
per_operator_headers=per_operator_headers,
backend_name=backend_name,
eager_registration=False,
)
# Generate native function impls that build IR nodes
ns_helper = NamespaceHelper(cpp_namespace)
fm.write_with_template(
f"{backend_key}NativeFunctions.cpp",
"DispatchKeyNativeFunctions.cpp",
lambda: {
"includes": [
f"#include <{path}>"
for path in [
tensor_class_hdr,
shape_inference_hdr,
"ATen/Functions.h",
"ATen/native/TensorConversions.h",
"ATen/NativeFunctions.h",
"ATen/CompositeExplicitAutogradNonFunctionalFunctions.h",
"ATen/MetaFunctions.h",
"ATen/Operators.h",
"ATen/native/CPUFallback.h",
"torch/csrc/lazy/core/ir_builder.h",
"torch/csrc/lazy/core/lazy_graph_executor.h",
"torch/csrc/lazy/core/metrics.h",
"torch/csrc/lazy/core/shape.h",
f"{output_dir}/{backend_key}NativeFunctions.h",
f"{output_dir}/LazyIr.h",
]
+ (
["torch/csrc/lazy/ts_backend/ts_eager_fallback.h"]
if gen_forced_fallback_code
else []
)
],
"helper_fns": get_ltc_helper_fns(),
"native_functions_include": "",
"namespace_prologue": ns_helper.prologue,
"namespace_epilogue": ns_helper.epilogue,
"native_function_definitions": list(
concat_map_codegen(
dest.GenLazyNativeFuncDefinition(
f"{backend_key}NativeFunctions",
backend_indices[backend_key],
tensor_class,
gen_forced_fallback_code,
backend_namespace,
get_tensorlist,
get_tensor_or_wrap_number,
try_get_tensor,
metrics_counter,
create_tensor,
create_from_first_tensor,
create_aten_from_ltc_tensor,
tuple_aten_from_ltc_tensors,
lazy_tensor_ptr,
get_device_fn,
),
grouped_native_functions,
)
),
},
)
# Generate IR node classes
lazy_ir_obj = lazy_ir_generator(
backend_indices[backend_key], backend_name, node_base
)
fm.write_with_template(
"LazyIr.h",
"LazyIr.h",
lambda: {
"lazy_ir_sysinc": [
f"#include <{path}>"
for path in [
"ATen/core/Formatting.h",
"c10/core/ScalarType.h",
"c10/util/Optional.h",
"torch/csrc/lazy/core/hash.h",
"torch/csrc/lazy/core/ir.h",
"torch/csrc/lazy/core/shape.h",
"vector",
]
],
"lazy_ir_inc": [f'#include "{node_base_hdr}"']
if node_base_hdr is not None
else [],
"ir_declarations": list(
concat_map_codegen(
lazy_ir_obj, grouped_native_functions, full_codegen + ir_gen
)
),
"namespace_prologue": ns_helper.prologue,
"namespace_epilogue": ns_helper.epilogue,
},
)
# Generate Non Native IR Node classes
fm.write_with_template(
"LazyNonNativeIr.h",
"LazyNonNativeIr.h",
lambda: {
"lazy_non_native_ir_inc": [
f"#include <{path}>"
for path in [
"torch/csrc/lazy/core/ir.h",
"torch/csrc/lazy/core/ir_builder.h",
"torch/csrc/lazy/core/internal_ops/ltc_ops.h",
"torch/csrc/lazy/core/shape_inference.h",
]
+ ([node_base_hdr] if node_base_hdr else [])
if path
],
"non_native_ir_nodes": dest.generate_non_native_lazy_ir_nodes(
non_native, lazy_ir_obj
),
"namespace_prologue": ns_helper.prologue,
"namespace_epilogue": ns_helper.epilogue,
},
)
if __name__ == "__main__":
main()
| pytorch-master | torchgen/gen_lazy_tensor.py |
from dataclasses import dataclass
from typing import Dict, List, Optional, Set, Tuple
import yaml
from torchgen.model import NativeFunction
from torchgen.selective_build.operator import (
merge_debug_info,
merge_operator_dicts,
SelectiveBuildOperator,
strip_operator_overload_name,
)
# A SelectiveBuilder holds information extracted from the selective build
# YAML specification.
#
# It includes information about the build's selectivity, the debug_info
# associated with this selective build (opaque string), and the set of
# operators that should be included in the build.
#
@dataclass(frozen=True)
class SelectiveBuilder:
# If true, then the build is not selective, and includes all
# operators.
include_all_operators: bool
# Debug Information at the selective/custom build level.
_debug_info: Optional[Tuple[str, ...]]
# A dictionary of operator -> operator metadata.
operators: Dict[str, SelectiveBuildOperator]
# A dictionary of selected kernel tags and dtypes. Typically a
# PyTorch Operator Kernel (function) may have many code paths
# that are specialized for many many Tensor dtypes, so it's not
# one per kernel function, but there could be many per kernel
# function. The tag isn't a kernel function name, but some fragment
# of the kernel function implementation itself.
kernel_metadata: Dict[str, List[str]]
# A set of all the custom torch bind classes used by the selected models
# Stored as a set internally to remove duplicates proactively, but written
# as a list to yamls
custom_classes: Set[str]
# A set of all the build features used by the selected models
# Stored as a set internally to remove duplicates proactively, but written
# as a list to yamls
build_features: Set[str]
# If true, then fragments for all dtypes for all kernel functions
# are included as well as all custom classes. This is typically set when any one of the
# operator lists is generated from a mechanism other than
# tracing based selective build.
include_all_non_op_selectives: bool
@staticmethod
def get_nop_selector() -> "SelectiveBuilder":
return SelectiveBuilder.from_yaml_dict({"include_all_operators": True})
@staticmethod
def from_yaml_dict(data: Dict[str, object]) -> "SelectiveBuilder":
valid_top_level_keys = {
"include_all_non_op_selectives",
"include_all_operators",
"debug_info",
"operators",
"kernel_metadata",
"custom_classes",
"build_features",
}
top_level_keys = set(data.keys())
if len(top_level_keys - valid_top_level_keys) > 0:
raise Exception(
"Got unexpected top level keys: {}".format(
",".join(top_level_keys - valid_top_level_keys),
)
)
include_all_operators = data.get("include_all_operators", False)
assert isinstance(include_all_operators, bool)
debug_info = None
if "debug_info" in data:
di_list = data["debug_info"]
assert isinstance(di_list, list)
debug_info = tuple(map(lambda x: str(x), di_list))
operators = {}
operators_dict = data.get("operators", {})
assert isinstance(operators_dict, dict)
for (k, v) in operators_dict.items():
operators[k] = SelectiveBuildOperator.from_yaml_dict(k, v)
kernel_metadata = {}
kernel_metadata_dict = data.get("kernel_metadata", {})
assert isinstance(kernel_metadata_dict, dict)
for (k, v) in kernel_metadata_dict.items():
kernel_metadata[str(k)] = list(map(lambda dtype: str(dtype), v))
custom_classes = data.get("custom_classes", [])
custom_classes = set(custom_classes) # type: ignore[arg-type]
build_features = data.get("build_features", [])
build_features = set(build_features) # type: ignore[arg-type]
include_all_non_op_selectives = data.get("include_all_non_op_selectives", False)
assert isinstance(include_all_non_op_selectives, bool)
return SelectiveBuilder(
include_all_operators,
debug_info,
operators,
kernel_metadata,
custom_classes, # type: ignore[arg-type]
build_features, # type: ignore[arg-type]
include_all_non_op_selectives,
)
@staticmethod
def from_yaml_str(config_contents: str) -> "SelectiveBuilder":
contents = yaml.safe_load(config_contents)
return SelectiveBuilder.from_yaml_dict(contents)
@staticmethod
def from_yaml_path(config_path: str) -> "SelectiveBuilder":
with open(config_path, "r") as f:
contents = yaml.safe_load(f)
return SelectiveBuilder.from_yaml_dict(contents)
@staticmethod
def from_legacy_op_registration_allow_list(
allow_list: Set[str], is_root_operator: bool, is_used_for_training: bool
) -> "SelectiveBuilder":
operators = {}
for op in allow_list:
operators[op] = {
"name": op,
"is_root_operator": is_root_operator,
"is_used_for_training": is_used_for_training,
"include_all_overloads": True,
}
return SelectiveBuilder.from_yaml_dict(
{
"operators": operators,
"include_all_non_op_selectives": True,
}
)
def is_operator_selected(self, name: str) -> bool:
if self.include_all_operators:
return True
if name in self.operators:
return True
name = strip_operator_overload_name(name)
return name in self.operators and self.operators[name].include_all_overloads
def is_native_function_selected(self, func: NativeFunction) -> bool:
op_name = op_name_from_native_function(func)
return self.is_operator_selected(op_name)
def is_operator_selected_for_training(self, name: str) -> bool:
if not self.is_operator_selected(name):
return False
if self.include_all_operators:
return True
not_training_op = SelectiveBuildOperator(
name="",
is_root_operator=False,
is_used_for_training=False,
include_all_overloads=False,
_debug_info=None,
)
op = not_training_op
if name in self.operators:
op = self.operators[name]
name = strip_operator_overload_name(name)
base_op = not_training_op
if name in self.operators:
base_op = self.operators[name]
return op.is_used_for_training or (
base_op.include_all_overloads and base_op.is_used_for_training
)
def is_native_function_selected_for_training(self, func: NativeFunction) -> bool:
op_name = op_name_from_native_function(func)
return self.is_operator_selected_for_training(op_name)
def is_root_operator(self, name: str) -> bool:
if not self.is_operator_selected(name):
return False
if self.include_all_operators:
return True
if name in self.operators:
op: SelectiveBuildOperator = self.operators[name]
return op.is_root_operator
name = strip_operator_overload_name(name)
if name not in self.operators:
return False
base_op: SelectiveBuildOperator = self.operators[name]
return base_op.include_all_overloads and base_op.is_root_operator
def is_kernel_dtype_selected(self, kernel_tag: str, dtype: str) -> bool:
if self.include_all_operators or self.include_all_non_op_selectives:
return True
return (
kernel_tag in self.kernel_metadata
and dtype in self.kernel_metadata[kernel_tag]
)
def to_dict(self) -> Dict[str, object]:
ret: Dict[str, object] = {
"include_all_non_op_selectives": self.include_all_non_op_selectives,
"include_all_operators": self.include_all_operators,
}
operators = {}
for (op_name, op) in self.operators.items():
operators[op_name] = op.to_dict()
ret["operators"] = operators
if self._debug_info is not None:
ret["debug_info"] = sorted(self._debug_info)
ret["kernel_metadata"] = {
k: sorted(list(v)) for (k, v) in self.kernel_metadata.items()
}
ret["custom_classes"] = sorted(self.custom_classes)
ret["build_features"] = sorted(self.build_features)
return ret
def merge_kernel_metadata(
lhs: Dict[str, List[str]],
rhs: Dict[str, List[str]],
) -> Dict[str, List[str]]:
kernel_metadata: Dict[str, List[str]] = {}
for (tag_name, dtypes) in list(lhs.items()) + list(rhs.items()):
dtypes_copy = set(dtypes)
if tag_name in kernel_metadata:
dtypes_copy |= set(kernel_metadata[tag_name])
kernel_metadata[tag_name] = list(dtypes_copy)
return kernel_metadata
def combine_selective_builders(
lhs: SelectiveBuilder, rhs: SelectiveBuilder
) -> SelectiveBuilder:
include_all_operators = lhs.include_all_operators or rhs.include_all_operators
debug_info = merge_debug_info(lhs._debug_info, rhs._debug_info)
operators = merge_operator_dicts(lhs.operators, rhs.operators)
kernel_metadata = merge_kernel_metadata(lhs.kernel_metadata, rhs.kernel_metadata)
include_all_non_op_selectives = (
lhs.include_all_non_op_selectives or rhs.include_all_non_op_selectives
)
custom_classes = lhs.custom_classes.union(rhs.custom_classes)
build_features = lhs.build_features.union(rhs.build_features)
return SelectiveBuilder(
include_all_operators,
debug_info,
operators,
kernel_metadata,
custom_classes,
build_features,
include_all_non_op_selectives,
)
def op_name_from_native_function(f: NativeFunction) -> str:
# This was originally read from the 'operator_name_with_overload' field in the
# declaration dict, which was the part before the first '(' in 'schema_string'.
return f"{f.namespace}::{f.func.name}"
| pytorch-master | torchgen/selective_build/selector.py |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple
# This class holds information about a single operator used to determine
# the outcome of a selective/custom PyTorch build that doesn't include
# registration code for all the supported operators. This is done to
# reduce the size of the generated binary so that it can be deployed in
# situations where binary size comes at a premium.
#
@dataclass(frozen=True)
class SelectiveBuildOperator:
# The name of the operator. This includes the aten::, etc... prefix
# The operator name may or may not have the overload name. If this
# operator name does not specify an overload name, the way to determine
# if this entry refers to the family of operators with this base name
# or just the operator with this name is to look at the value of the
# 'include_all_overloads' flag in this class.
name: str
# True if this is a root operator (i.e. called directly from a
# TorchScript model, etc...). An operator is considered to be a
# root operator if it is called directly from any one of the models
# that this instance of the pytorch library was built for. Hence, it
# may not be a root operator in all of the models that are used in
# this instance of the pytorch library.
is_root_operator: bool
# Is this operator used for on-device training? If True, then we need to
# use the information to generate code in VariableType_N.cpp for registration
# of training related operators. Again, this is True if this operator
# is used for training in one or more models used by this instance of the
# pytorch library.
is_used_for_training: bool
# If True, it indicates that this operator instance (object) refers to an
# operator without the overload name and should apply to all overloads
# which have this operator name as the base name. This flag is applicable
# only for objects that have operator names without a DOT (period) character
# in them.
#
# Note: This flag is a temporary workaround to grandfather in the current
# static selective (custom) build mechanism, which largely ignores overload
# names when determining whether to select operators for registration
# purposes.
include_all_overloads: bool
# Debug Information at the operator level
_debug_info: Optional[Tuple[str, ...]]
@staticmethod
def from_yaml_dict(
op_name: str, op_info: Dict[str, object]
) -> "SelectiveBuildOperator":
allowed_keys = {
"name",
"is_root_operator",
"is_used_for_training",
"include_all_overloads",
"debug_info",
}
if len(set(op_info.keys()) - allowed_keys) > 0:
raise Exception(
"Got unexpected top level keys: {}".format(
",".join(set(op_info.keys()) - allowed_keys),
)
)
if "name" in op_info:
assert op_name == op_info["name"]
is_root_operator = op_info.get("is_root_operator", True)
assert isinstance(is_root_operator, bool)
is_used_for_training = op_info.get("is_used_for_training", True)
assert isinstance(is_used_for_training, bool)
include_all_overloads = op_info.get("include_all_overloads", True)
assert isinstance(include_all_overloads, bool)
debug_info: Optional[Tuple[str, ...]] = None
if "debug_info" in op_info:
di_list = op_info["debug_info"]
assert isinstance(di_list, list)
debug_info = tuple(map(lambda x: str(x), di_list))
return SelectiveBuildOperator(
name=op_name,
is_root_operator=is_root_operator,
is_used_for_training=is_used_for_training,
include_all_overloads=include_all_overloads,
_debug_info=debug_info,
)
@staticmethod
def from_legacy_operator_name_without_overload(
name: str,
) -> "SelectiveBuildOperator":
return SelectiveBuildOperator(
name=name,
is_root_operator=True,
is_used_for_training=True,
include_all_overloads=True,
_debug_info=None,
)
def to_dict(self) -> Dict[str, object]:
ret: Dict[str, object] = {
"is_root_operator": self.is_root_operator,
"is_used_for_training": self.is_used_for_training,
"include_all_overloads": self.include_all_overloads,
}
if self._debug_info is not None:
ret["debug_info"] = self._debug_info
return ret
def merge_debug_info(
lhs: Optional[Tuple[str, ...]],
rhs: Optional[Tuple[str, ...]],
) -> Optional[Tuple[str, ...]]:
# Ensure that when merging, each entry shows up just once.
if lhs is None and rhs is None:
return None
return tuple(set((lhs or ()) + (rhs or ())))
def combine_operators(
lhs: "SelectiveBuildOperator", rhs: "SelectiveBuildOperator"
) -> "SelectiveBuildOperator":
if str(lhs.name) != str(rhs.name):
raise Exception(
"Expected both arguments to have the same name, but got '{}' and '{}' instead".format(
str(lhs.name),
str(rhs.name),
)
)
return SelectiveBuildOperator(
name=lhs.name,
# Consider this operator to be a root operator if it is a
# root operator in any of the models used in this instance of
# the pytorch library.
is_root_operator=lhs.is_root_operator or rhs.is_root_operator,
# Consider this operator to be a training operator if it is
# an operator used for training in any of the models used
# in this instance of the pytorch library.
is_used_for_training=lhs.is_used_for_training or rhs.is_used_for_training,
include_all_overloads=lhs.include_all_overloads or rhs.include_all_overloads,
_debug_info=merge_debug_info(lhs._debug_info, rhs._debug_info),
)
def merge_operator_dicts(
lhs: Dict[str, SelectiveBuildOperator],
rhs: Dict[str, SelectiveBuildOperator],
) -> Dict[str, SelectiveBuildOperator]:
operators: Dict[str, SelectiveBuildOperator] = {}
for (op_name, op) in list(lhs.items()) + list(rhs.items()):
new_op = op
if op_name in operators:
new_op = combine_operators(operators[op_name], op)
operators[op_name] = new_op
return operators
def strip_operator_overload_name(op_name: str) -> str:
return op_name.split(".")[0]
| pytorch-master | torchgen/selective_build/operator.py |
pytorch-master | torchgen/selective_build/__init__.py |
|
from typing import Dict, Union
from torchgen.model import NativeFunctionsGroup, NativeFunctionsViewGroup
def func_name_base_str(g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup]) -> str:
if isinstance(g, NativeFunctionsGroup):
return str(g.functional.func.name.name.base)
else:
return str(g.view.root_name)
is_hand_written_ops_ = frozenset(
(
"abs",
"add",
"addmm",
"all",
"any",
"argmin",
"bmm",
"clamp",
"clamp_min",
"cumsum",
"div",
"fmod",
"index_select",
"leaky_relu",
"linear",
"log",
"matmul",
"mul",
"narrow_copy",
"nonzero",
"pow",
"remainder",
"sigmoid",
"sign",
"sub",
"tanh",
"detach",
"expand_as",
"flatten",
"narrow",
"reshape_as",
"select",
"slice",
"softmax",
"split",
"squeeze",
"transpose",
"view",
"where",
)
)
def is_hand_written(g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup]) -> bool:
name_base = func_name_base_str(g)
return name_base in is_hand_written_ops_
def override_test_values(arg_map: Dict[str, str], op_name: str, index: int) -> None:
assert index == 0 or index == 1
if op_name == "addr":
if index == 0:
arg_map["self"] = "at::rand({6, 6})"
arg_map["vec1"] = "at::rand({6})"
arg_map["vec2"] = "at::rand({6})"
else:
arg_map["self"] = "at::rand({22, 22})"
arg_map["vec1"] = "at::rand({22})"
arg_map["vec2"] = "at::rand({22})"
return
if op_name == "mv":
if index == 0:
arg_map["self"] = "at::rand({6, 6})"
arg_map["vec"] = "at::rand({6})"
else:
arg_map["self"] = "at::rand({22, 22})"
arg_map["vec"] = "at::rand({22})"
return
if op_name == "addbmm":
if index == 0:
arg_map["self"] = "at::rand({6, 6})"
else:
arg_map["self"] = "at::rand({22, 22})"
return
if op_name == "cross":
if index == 0:
arg_map["self"] = "at::rand({3, 3, 3})"
arg_map["other"] = "at::rand({3, 3, 3})"
else:
arg_map["self"] = "at::rand({22, 3, 22})"
arg_map["other"] = "at::rand({22, 3, 22})"
return
if op_name == "take":
if index == 0:
arg_map["index"] = "at::randint(0, 216, {20}, torch::kInt64)"
else:
arg_map["index"] = "at::randint(0, 1000, {100}, torch::kInt64)"
return
if op_name == "take_along_dim":
if index == 0:
arg_map["indices"] = "at::argsort(self0, 1, true)"
else:
arg_map["indices"] = "at::argsort(self1, 1, true)"
return
if op_name == "masked_select":
if index == 0:
arg_map["mask"] = "at::randn({6, 6, 6}) > 0.5"
else:
arg_map["mask"] = "at::rand({22, 22, 22}) > 0.5"
return
if op_name == "orgqr":
if index == 0:
arg_map["input2"] = "at::rand({6, 6})"
else:
arg_map["input2"] = "at::rand({22, 22})"
return
if op_name == "ormqr":
if index == 0:
arg_map["input2"] = "at::rand({6, 6})"
else:
arg_map["input2"] = "at::rand({22, 22})"
return
if op_name == "quantile":
if index == 0:
arg_map["q"] = "at::rand({6})"
arg_map["interpolation"] = '"linear"'
else:
arg_map["q"] = "at::rand({22})"
arg_map["interpolation"] = '"linear"'
return
if op_name == "nanquantile":
if index == 0:
arg_map["q"] = "at::rand({6})"
arg_map["interpolation"] = '"linear"'
else:
arg_map["q"] = "at::rand({22})"
arg_map["interpolation"] = '"linear"'
return
if op_name == "multi_margin_loss":
if index == 0:
arg_map["self"] = "at::rand({6, 6})"
arg_map["target"] = "at::randint(6, {6}, torch::kInt64)"
arg_map["weight"] = "at::rand({6})"
else:
arg_map["self"] = "at::rand({22, 22})"
arg_map["target"] = "at::randint(22, {22}, torch::kInt64)"
arg_map["weight"] = "at::rand({22})"
return
if op_name == "multilabel_margin_loss":
if index == 0:
arg_map["self"] = "at::rand({6, 6})"
arg_map["target"] = "at::randint(6, {6, 6}, torch::kInt64)"
else:
arg_map["self"] = "at::rand({22, 22})"
arg_map["target"] = "at::randint(22, {22, 22}, torch::kInt64)"
return
if op_name == "nll_loss":
if index == 0:
arg_map["self"] = "at::rand({6, 6})"
arg_map["target"] = "at::randint(6, {6}, torch::kInt64)"
arg_map["weight"] = "at::rand({6})"
else:
arg_map["self"] = "at::rand({22, 22})"
arg_map["target"] = "at::randint(22, {22}, torch::kInt64)"
arg_map["weight"] = "at::rand({22})"
return
if op_name == "nll_loss2d":
if index == 0:
arg_map["self"] = "at::rand({6, 6, 6, 6})"
arg_map["target"] = "at::randint(6, {6, 6, 6}, torch::kInt64)"
arg_map["weight"] = "at::rand({6})"
else:
arg_map["self"] = "at::rand({22, 22, 22, 22})"
arg_map["target"] = "at::randint(22, {22, 22, 22}, torch::kInt64)"
arg_map["weight"] = "at::rand({22})"
return
if op_name in (
"fft_fft",
"fft_ifft",
"fft_rfft",
"fft_irfft",
"fft_hfft",
"fft_ihfft",
):
arg_map["norm"] = '"forward"'
return
if op_name == "linalg_tensorinv":
if index == 0:
arg_map["self"] = "at::rand({6, 6, 6, 6})"
arg_map["ind"] = "2"
else:
arg_map["self"] = "at::rand({22, 22, 22, 22})"
arg_map["ind"] = "2"
return
if op_name == "addmv":
if index == 0:
arg_map["self"] = "at::rand({2})"
arg_map["mat"] = "at::rand({2, 2})"
arg_map["vec"] = "at::rand({2})"
else:
arg_map["self"] = "at::rand({35})"
arg_map["mat"] = "at::rand({35, 35})"
arg_map["vec"] = "at::rand({35})"
return
if op_name == "acosh":
if index == 0:
arg_map["self"] = "at::rand({2, 2, 2}) + at::ones({2, 2, 2})"
else:
arg_map["self"] = "at::rand({5, 5, 5}) + at::ones({5, 5, 5})"
return
if op_name == "adaptive_max_pool2d_backward":
if index == 0:
arg_map["grad_output"] = "at::randint(-3, 2, {2,2,2})"
arg_map["self"] = "at::randint(-3, 2, {2,2,2})"
arg_map["indices"] = "at::randint(0, 1, {2,2,2}, at::kLong)"
else:
arg_map["grad_output"] = "at::randint(-3, 3, {3,3,3})"
arg_map["self"] = "at::randint(-3, 2, {3,3,3})"
arg_map["indices"] = "at::randint(0, 1, {3,3,3}, at::kLong)"
return
if op_name == "adaptive_max_pool3d_backward":
if index == 0:
arg_map["grad_output"] = "at::randint(-3, 2, {2,2,2,2})"
arg_map["self"] = "at::randint(-3, 2, {2,2,2,2})"
arg_map["indices"] = "at::randint(0, 1, {2,2,2,2}, at::kLong)"
else:
arg_map["grad_output"] = "at::randint(-3, 3, {3,3,3,3})"
arg_map["self"] = "at::randint(-3, 2, {3,3,3,3})"
arg_map["indices"] = "at::randint(0, 1, {3,3,3,3}, at::kLong)"
return
if op_name == "gather":
if index == 0:
arg_map["self"] = "at::randint(1, 100, {2,2,2}, at::kInt)"
arg_map["dim"] = "1"
arg_map["index"] = "at::randint(0, 1, {2,2,2}, torch::kInt64)"
arg_map["sparse_grad"] = "false"
else:
arg_map["self"] = "at::randint(1, 100, {5,5,5}, at::kInt)"
arg_map["dim"] = "1"
arg_map["index"] = "at::randint(0, 4, {5,5,5}, torch::kInt64)"
arg_map["sparse_grad"] = "false"
return
if op_name == "gelu":
if index == 0:
arg_map["self"] = "at::rand({6, 6, 6})"
arg_map["approximate"] = '"tanh"'
else:
arg_map["self"] = "at::rand({22, 22, 22})"
arg_map["approximate"] = '"tanh"'
return
if op_name == "gelu_backward":
if index == 0:
arg_map["grad_output"] = "at::rand({6, 6, 6})"
arg_map["self"] = "at::rand({6, 6, 6})"
arg_map["approximate"] = '"tanh"'
else:
arg_map["grad_output"] = "at::rand({22, 22, 22})"
arg_map["self"] = "at::rand({22, 22, 22})"
arg_map["approximate"] = '"tanh"'
return
if op_name == "index_add":
if index == 0:
arg_map["self"] = "at::rand({2})"
arg_map["dim"] = "0"
arg_map["index"] = "at::randint(0, 1, {2}, at::kInt)"
arg_map["source"] = "at::rand({2})"
arg_map["alpha"] = "2"
else:
arg_map["self"] = "at::rand({16})"
arg_map["dim"] = "0"
arg_map["index"] = "at::randint(0, 10, {16}, at::kInt)"
arg_map["source"] = "at::rand({16})"
arg_map["alpha"] = "2"
return
if op_name == "index_copy":
if index == 0:
arg_map["self"] = "at::rand({2})"
arg_map["dim"] = "0"
arg_map["index"] = "at::randint(0, 1, {2}, at::kLong)"
arg_map["source"] = "at::rand({2})"
else:
arg_map["self"] = "at::rand({32})"
arg_map["dim"] = "0"
arg_map["index"] = "at::randint(0, 10, {32}, at::kLong)"
arg_map["source"] = "at::rand({32})"
return
if op_name == "linalg_cross":
if index == 0:
arg_map["self"] = "at::rand({6, 3, 6})"
arg_map["other"] = "at::rand({6, 3, 6})"
arg_map["dim"] = "1"
else:
arg_map["self"] = "at::rand({22, 3, 22})"
arg_map["other"] = "at::rand({22, 3, 22})"
arg_map["dim"] = "1"
return
if op_name == "nll_loss_backward":
if index == 0:
arg_map["grad_output"] = "at::rand({})"
arg_map["self"] = "at::rand({6})"
arg_map["target"] = "at::randint(0, 5, {6}, torch::kInt64)"
arg_map["weight"] = "at::rand({6})"
arg_map["reduction"] = "1"
arg_map["ignore_index"] = "1"
arg_map["total_weight"] = "at::rand({})"
else:
arg_map["grad_output"] = "at::rand({})"
arg_map["self"] = "at::rand({36})"
arg_map["target"] = "at::randint(0, 11, {36}, torch::kInt64)"
arg_map["weight"] = "at::rand({36})"
arg_map["reduction"] = "1"
arg_map["ignore_index"] = "1"
arg_map["total_weight"] = "at::rand({})"
return
if op_name in ["scatter", "scatter_add", "_scatter_reduce"]:
if index == 0:
arg_map["self"] = "at::randint(1, 100, {2,2,2}, torch::kInt64)"
arg_map["index"] = "at::randint(0, 1, {2,2,2}, torch::kInt64)"
arg_map["src"] = "at::randint(1, 100, {2,2,2}, torch::kInt64)"
else:
arg_map["self"] = "at::randint(1, 100, {5,5,5}, torch::kInt64)"
arg_map["index"] = "at::randint(0, 1, {5,5,5}, torch::kInt64)"
arg_map["src"] = "at::randint(1, 100, {5,5,5}, torch::kInt64)"
if "reduce" in arg_map:
arg_map["reduce"] = '"sum"' if op_name == "_scatter_reduce" else '"add"'
return
if op_name == "scatter_reduce":
arg_map["reduce"] = '"mean"'
if index == 0:
arg_map["index"] = "at::randint(6, {6, 6, 6}, torch::kInt64)"
else:
arg_map["index"] = "at::randint(22, {22, 22, 22}, torch::kInt64)"
return
if op_name == "special_zeta":
if index == 0:
arg_map["self"] = "at::rand({2,2,2}, at::kDouble) + at::ones({2,2,2})"
arg_map["other"] = "at::rand({2,2,2}, at::kDouble) + at::ones({2,2,2})"
else:
arg_map["self"] = "at::rand({5,5,5}, at::kDouble) + at::ones({5,5,5})"
arg_map["other"] = "at::rand({5,5,5}, at::kDouble) + at::ones({5,5,5})"
return
if op_name == "_convert_indices_from_csr_to_coo":
if index == 0:
arg_map["crow_indices"] = "torch::tensor({1}, torch::kInt32)"
arg_map["col_indices"] = "torch::tensor({0, 1, 0}, torch::kInt32)"
arg_map["out_int32"] = "false"
else:
arg_map["crow_indices"] = "torch::tensor({0}, torch::kInt32)"
arg_map[
"col_indices"
] = "torch::tensor({0, 1, 0, 2, 1, 2, 0, 1, 0, 2, 1, 2}, torch::kInt32)"
arg_map["out_int32"] = "false"
return
if op_name == "_convert_indices_from_coo_to_csr":
if index == 0:
arg_map["self"] = "at::randint(0, 3, {2}, at::kInt)"
arg_map["size"] = "10"
arg_map["out_int32"] = "false"
else:
arg_map["self"] = "at::randint(0, 3, {12}, at::kInt)"
arg_map["size"] = "24"
arg_map["out_int32"] = "false"
return
if op_name in ("diagonal", "linalg_diagonal"):
arg_map["offset"] = "0"
arg_map["dim0"] = "1"
arg_map["dim1"] = "2"
return
| pytorch-master | torchgen/static_runtime/config.py |
pytorch-master | torchgen/static_runtime/__init__.py |
|
import json
import logging
import math
from typing import Dict, List, Optional, Sequence, Tuple, Union
import torchgen.api.cpp as cpp
from torchgen.context import native_function_manager
from torchgen.model import (
Argument,
BackendIndex,
BaseTy,
BaseType,
FunctionSchema,
NativeFunctionsGroup,
NativeFunctionsViewGroup,
OptionalType,
SelfArgument,
TensorOptionsArguments,
Type,
)
from torchgen.static_runtime import config
logger: logging.Logger = logging.getLogger()
def has_alias(
arguments: Sequence[Union[Argument, SelfArgument, TensorOptionsArguments]]
) -> bool:
for arg in arguments:
annotation = getattr(arg, "annotation", None)
if not annotation:
continue
alias_set = getattr(annotation, "alias_set", ())
if alias_set:
return True
return False
BLOCKED_OPS = frozenset(
(
# non cpu ops
"sparse_sampled_addmm",
"hspmm",
# sparse ops
"sspaddmm",
"coalesce",
"_indices",
"indices",
"_values",
"values",
"crow_indices",
"col_indices",
# deprecated ops
"floor_divide",
"ger",
# buggy ops
"conj_physical", # P495807361
"binary_cross_entropy", # P496394764
"arccosh",
# uncommon ops
"cholesky",
"lu_solve",
"linalg_cholesky",
"linalg_householder_product",
"linalg_ldl_solve",
"_compute_linear_combination",
# training related ops
"_make_dual",
# cannot call directly
"_fw_primal",
# no documentation
"_index_reduce",
)
)
def is_supported(g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup]) -> bool:
base_op_name = ""
func = None
if isinstance(g, NativeFunctionsViewGroup):
base_op_name = g.view.root_name
func = g.view.func
else:
base_op_name = g.out.func.name.name.base
func = g.out.func
if config.is_hand_written(g):
logger.info(f"HAND WRITTEN: {base_op_name}")
return False
if base_op_name in BLOCKED_OPS:
logger.info(f"BLOCKED: {base_op_name}")
return False
for arg in func.schema_order_arguments():
maybe_method = ivalue_type_conversion_method(arg.type)
if not maybe_method:
# Type converting is unsupported yet.
logger.info(f"NOT SUPPORTED TYPE CONVERTING: {str(func)}")
return False
if isinstance(g, NativeFunctionsViewGroup):
if "at::Tensor" != cpp.returns_type(func.returns).cpp_type():
# Returns a non-Tensor value.
logger.info(f"NON-TENSOR RET TYPE: {str(func)}")
return False
return True
# For out variant ops, we need to check the arguments of its functional func.
for arg in g.functional.func.schema_order_arguments():
maybe_method = ivalue_type_conversion_method(arg.type)
if not maybe_method:
# Type converting is unsupported yet.
logger.info(f"NOT SUPPORTED TYPE CONVERTING: {str(g.functional.func)}")
return False
if not g.structured:
# In case of unstructured op, we check if it has out variant implementation.
# The out variant implementation satisfies the minimum requirement that it has the output tensor as the last
# parameter.
if (
not hasattr(g, "out")
or not str(func).endswith("Tensor(a!) out) -> Tensor(a!)")
or not str(func.name).endswith(".out")
):
return False
if "at::Tensor &" != cpp.returns_type(func.returns).cpp_type():
logger.info(f"NON_TENSOR RET TYPE: {str(func)}")
return False
if has_alias(func.arguments.non_out):
# This op may create an alias of inputs.
logger.info(f"INPUTS ALIAS: {base_op_name}")
return False
return True
def ivalue_type_conversion_method(
arg_type: Union[BaseType, OptionalType, Type]
) -> Optional[Tuple[bool, str]]:
"""
Return the method call expression of `c10::ivalue' to convert its contained value to
the expected value of `arg_type` type. For example, for `arg_type` == BaseTy.Tensor,
this function returns ".toTensor()", so that it can be appended to the ivalue's
variable name to get the value of the expected type.
"""
type_conversion_methods = {
BaseTy.Tensor: ((True, "toTensor()"), (False, "toOptional<at::Tensor>()")),
BaseTy.int: ((False, "toInt()"), (False, "toOptional<int64_t>()")),
BaseTy.bool: ((False, "toBool()"), (False, "toOptional<bool>()")),
BaseTy.Scalar: ((False, "toScalar()"), (False, "toOptional<at::Scalar>()")),
BaseTy.ScalarType: (
(False, "toScalarType()"),
(False, "toOptional<at::ScalarType>()"),
),
BaseTy.str: (
(False, "toStringView()"),
(False, "toOptional<c10::string_view>()"),
),
}
base_ty_object = None
if isinstance(arg_type, BaseType):
base_ty_object = arg_type.name
elif isinstance(arg_type, OptionalType):
if not isinstance(arg_type.elem, BaseType):
# ListType is currently unsupported.
return None
base_ty_object = arg_type.elem.name
else:
return None
if base_ty_object not in type_conversion_methods:
return None
methods = type_conversion_methods[base_ty_object]
if isinstance(arg_type, BaseType):
return methods[0]
return methods[1]
should_use_int_tensor_ops_ = frozenset(
(
"bitwise_not",
"bitwise_and",
"bitwise_or",
"bitwise_xor",
"bitwise_left_shift",
"bitwise_right_shift",
"gcd",
"lcm",
"scatter",
"gather",
"_convert_indices_from_coo_to_csr",
"_convert_indices_from_csr_to_coo",
)
)
should_use_complex_tensor_ops_ = frozenset(("view_as_real", "imag", "_conj"))
def should_use_int_tensor(op_name: str) -> bool:
return op_name in should_use_int_tensor_ops_
def should_use_complex_tensor(op_name: str) -> bool:
return op_name in should_use_complex_tensor_ops_
test_tensor_dim_ops_1_ = frozenset(
(
"addmv",
"index_add",
"_convert_indices_from_coo_to_csr",
"_convert_indices_from_csr_to_coo",
"nll_loss_backward",
"dot",
"vdot",
"outer",
"ger",
)
)
test_tensor_dim_ops_2_ = frozenset(
("addmm", "mm", "nuclear_norm", "diag", "_addmm_activation", "matrix_H", "t")
)
def test_tensor_dim(op_name: str) -> int:
if op_name in test_tensor_dim_ops_1_:
return 1
if op_name in test_tensor_dim_ops_2_:
return 2
return 3
test_tensor_shapes_string = '{"view_as_complex": "{2, 2}"}'
test_tensor_shape_json: Dict[str, str] = json.loads(test_tensor_shapes_string)
def test_tensor_shape(op_name: str) -> str:
if op_name in test_tensor_shape_json:
return test_tensor_shape_json[op_name]
else:
return ""
def test_value_expression(
arg_type: Union[BaseType, OptionalType, Type], index: int, op_name: str
) -> str:
tensor_size_ex = test_tensor_shape(op_name)
if tensor_size_ex == "":
num_tensors = 16 if index == 0 else 64
num_dim = test_tensor_dim(op_name)
size_per_dim = math.ceil(num_tensors / float(num_dim))
size_per_dim += size_per_dim % 2
tensor_size_ex = "{%s}" % (",".join([f"{size_per_dim}"] * num_dim))
if should_use_int_tensor(op_name):
tensor_expression = f"at::randint(1, 100, {tensor_size_ex}, at::kInt)"
elif should_use_complex_tensor(op_name):
tensor_expression = f"at::randn({tensor_size_ex}, at::kComplexFloat)"
else:
tensor_expression = f"at::rand({tensor_size_ex})"
value_expressions = {
BaseTy.Tensor: tensor_expression,
BaseTy.int: "1",
BaseTy.bool: "false",
BaseTy.Scalar: "2",
BaseTy.ScalarType: "at::ScalarType::Float",
BaseTy.str: '"floor"',
}
base_ty_object = None
if isinstance(arg_type, BaseType):
base_ty_object = arg_type.name
else:
assert isinstance(arg_type, OptionalType) and isinstance(
arg_type.elem, BaseType
)
base_ty_object = arg_type.elem.name
assert base_ty_object in value_expressions, "not expected type"
value_expression = value_expressions[base_ty_object]
return value_expression
def generate_test_value_definitions(schema: FunctionSchema, index: int) -> str:
assert not schema.is_out_fn()
schema_name = schema.name.name.base
arg_map = {}
for arg in schema.schema_order_arguments():
test_value_exp = test_value_expression(arg.type, index, schema_name)
arg_map[arg.name] = test_value_exp
config.override_test_values(arg_map, schema_name, index)
arg_populations = []
for arg_name, arg_value in arg_map.items():
arg_populations.append(f"auto {arg_name}{index} = {arg_value}")
return ";\n ".join(arg_populations) + ";"
def generate_test_value_names(schema: FunctionSchema, index: int) -> str:
assert not schema.is_out_fn()
return ",".join(f"{arg.name}{index}" for arg in schema.schema_order_arguments())
generate_test_ir_arguments_base_ty_to_type_str_ = {
BaseTy.Tensor: "Tensor",
BaseTy.int: "int",
BaseTy.float: "float",
BaseTy.str: "str",
BaseTy.Scalar: "int",
BaseTy.ScalarType: "int",
BaseTy.bool: "bool",
}
def generate_test_ir_arguments(
schema: FunctionSchema,
) -> List[Tuple[str, Optional[str]]]:
def ir_argument(arg: Argument) -> Tuple[str, Optional[str]]:
t = arg.type
add_optional = False
if isinstance(t, OptionalType):
t = t.elem
add_optional = True
assert isinstance(t, BaseType)
type_str = None
if t.name in generate_test_ir_arguments_base_ty_to_type_str_:
type_str = generate_test_ir_arguments_base_ty_to_type_str_[t.name]
if type_str and add_optional:
type_str = f"{type_str}?"
return ("%" + arg.name, type_str)
return [ir_argument(arg) for arg in schema.schema_order_arguments()]
def generate_arg_extraction(schema: FunctionSchema) -> str:
arg_populations = []
for i, arg in enumerate(schema.schema_order_arguments()):
maybe_method = ivalue_type_conversion_method(arg.type)
assert maybe_method
is_reference, type_conversion_method = maybe_method
reference = "&" if is_reference else ""
arg_populations.append(
f"const auto{reference} {arg.name} = p_node->Input({i}).{type_conversion_method}"
)
return ";\n ".join(arg_populations) + ";"
def get_kernel_name(g: NativeFunctionsGroup, backend_index: BackendIndex) -> str:
kernel = backend_index.get_kernel(g.functional)
if g.structured or kernel is None:
return cpp.name(g.functional.func)
return kernel.kernel
def get_out_kernel_name(g: NativeFunctionsGroup, backend_index: BackendIndex) -> str:
kernel = backend_index.get_kernel(g.out)
if g.structured or kernel is None:
return cpp.name(g.out.func)
return kernel.kernel
def generate_non_out_variant_call(
g: NativeFunctionsGroup, backend_index: BackendIndex
) -> str:
schema = g.functional.func
assert not schema.is_out_fn()
kernel_name = get_kernel_name(g, backend_index)
arg_names = (arg.name for arg in schema.schema_order_arguments())
namespace_name = "cpu" if g.structured else "native"
return f'at::{namespace_name}::{kernel_name}({",".join(arg_names)})'
def generate_call_to_view_ops(
g: NativeFunctionsViewGroup, backend_index: BackendIndex
) -> str:
schema = g.view.func
kernel_name = cpp.name(schema)
kernel = backend_index.get_kernel(g.view)
if kernel:
kernel_name = kernel.kernel
arg_names = (arg.name for arg in schema.schema_order_arguments())
namespace_name = "native"
return f'at::{namespace_name}::{kernel_name}({",".join(arg_names)})'
def generate_out_variant_call(
g: NativeFunctionsGroup, backend_index: BackendIndex
) -> str:
schema = g.out.func
assert schema.is_out_fn()
arg_names = []
kernel_name = get_out_kernel_name(g, backend_index)
if g.structured:
# structured op starts with the output tensor argument.
arg_names = [out_arg.name for out_arg in schema.arguments.out]
else:
arg_names = []
for arg in schema.arguments.non_out:
if isinstance(arg, SelfArgument):
arg_names.append(arg.argument.name)
else:
assert isinstance(arg, Argument)
arg_names.append(arg.name)
if not g.structured:
assert len(schema.arguments.out) == 1
arg_names.append(schema.arguments.out[0].name)
cpp_arg_names = ",".join(arg_names)
namespace_name = "cpu" if g.structured else "native"
return f"at::{namespace_name}::{kernel_name}({cpp_arg_names})"
no_memory_resize_ops = frozenset(
(
"isin.Scalar_Tensor",
"index_add",
"dot",
"vdot",
"nuclear_norm",
"histc",
"l1_loss",
"multi_margin_loss",
"multilabel_margin_loss",
"nll_loss",
"nll_loss2d",
)
)
def should_check_resize(schema: FunctionSchema) -> bool:
schema_str = str(schema)
type_variant_op_name = schema_str[: schema_str.find("(")]
return type_variant_op_name not in no_memory_resize_ops
def op_name_from_group(g: NativeFunctionsGroup) -> str:
return g.functional.func.name.name.base
class GenOpDispatcher:
def out_variant(
self, groups: Sequence[NativeFunctionsGroup], backend_index: BackendIndex
) -> str:
if not groups:
return ""
generated_type_variants = []
for g in groups:
with native_function_manager(g):
assert is_supported(g)
assert isinstance(g, NativeFunctionsGroup)
generated_type_variant = self.out_variant_op_generator(g, backend_index)
generated_type_variants.append(generated_type_variant)
op_name = op_name_from_group(groups[0])
body = "\n".join(generated_type_variants)
generated = f"""
REGISTER_OPERATOR_FUNCTOR(
aten::{op_name},
aten_{op_name},
[](Node* n) -> SROperator {{
{body}
LogAndDumpSchema(n);
return nullptr;
}});
"""
return generated
def view(
self, groups: Sequence[NativeFunctionsViewGroup], backend_index: BackendIndex
) -> str:
if not groups:
return ""
generated_type_variants = []
for g in groups:
with native_function_manager(g):
assert is_supported(g)
assert isinstance(g, NativeFunctionsViewGroup)
generated_type_variant = self.view_op_generator(g, backend_index)
generated_type_variants.append(generated_type_variant)
op_name = config.func_name_base_str(groups[0])
body = "\n".join(generated_type_variants)
generated = f"""
REGISTER_NATIVE_OPERATOR_FUNCTOR(
aten::{op_name},
aten_{op_name},
[](Node* n) -> SROperator {{
{body}
LogAndDumpSchema(n);
return nullptr;
}});
"""
return generated
def out_variant_op_generator(
self, g: NativeFunctionsGroup, backend_index: BackendIndex
) -> str:
functional = g.functional
schema = str(functional.func)
populated_argument = generate_arg_extraction(g.functional.func)
functional_variant_call = generate_non_out_variant_call(g, backend_index)
assert len(g.out.func.arguments.out) == 1
out_variable_name = str(g.out.func.arguments.out[0].name)
out_variant_call = generate_out_variant_call(g, backend_index)
generated = f"""
if (n->matches(torch::schema("aten::{schema}"))) {{
return [](ProcessedNode* p_node) {{
{populated_argument}
if (p_node->Output(0).isNone()) {{
p_node->Output(0) = {functional_variant_call};
return;
}}
auto& {out_variable_name} = p_node->Output(0).toTensor();
fastResizeToZero({out_variable_name});
{out_variant_call};
}};
}}"""
return generated
def view_op_generator(
self, g: NativeFunctionsViewGroup, backend_index: BackendIndex
) -> str:
schema = str(g.view.func)
populated_argument = generate_arg_extraction(g.view.func)
functional_variant_call = generate_call_to_view_ops(g, backend_index)
generated = f"""
if (n->matches(torch::schema("aten::{schema}"))) {{
return [](ProcessedNode* p_node) {{
{populated_argument}
p_node->Output(0) = {functional_variant_call};
}};
}}"""
return generated
class GenOpTestCase:
def out_variant(self, groups: Sequence[NativeFunctionsGroup]) -> str:
if not groups:
return ""
generated_type_variants = []
for g in groups:
with native_function_manager(g):
assert is_supported(g)
assert isinstance(g, NativeFunctionsGroup)
generated_type_variant = self.out_variant_op_test_case_generator(g)
generated_type_variants.append(generated_type_variant)
return "\n".join(generated_type_variants)
def view(self, groups: Sequence[NativeFunctionsViewGroup]) -> str:
if not groups:
return ""
generated_type_variants = []
for g in groups:
with native_function_manager(g):
assert is_supported(g)
assert isinstance(g, NativeFunctionsViewGroup)
generated_type_variant = self.view_op_test_case_generator(g)
generated_type_variants.append(generated_type_variant)
return "\n".join(generated_type_variants)
def out_variant_op_test_case_generator(self, g: NativeFunctionsGroup) -> str:
schema = g.functional.func
schema_str = str(schema)
assert schema_str.find("(") > 0
type_variant_op_name = schema_str[: schema_str.find("(")].replace(".", "_")
op_name = op_name_from_group(g)
assert type_variant_op_name.startswith(op_name)
arg_types = generate_test_ir_arguments(schema)
arg_declarations = ", ".join(
(
arg_name if arg_type is None else f"{arg_name}: {arg_type}"
for arg_name, arg_type in arg_types
)
)
arg_names = ", ".join((arg_name for arg_name, _ in arg_types))
assert (
len(schema.returns) == 1
and isinstance(schema.returns[0].type, BaseType)
and schema.returns[0].type.name is BaseTy.Tensor
)
test_value_definitions = generate_test_value_definitions(schema, 0)
test_value_names = generate_test_value_names(schema, 0)
test_value_definitions2 = generate_test_value_definitions(schema, 1)
test_value_names2 = generate_test_value_names(schema, 1)
check_resize = "true" if should_check_resize(schema) else "false"
generated = f"""
TEST(StaticRuntime, autogen_{type_variant_op_name}) {{
const std::string script = R"IR(
graph({arg_declarations}):
%bias: None = prim::Constant()
%ret = aten::{op_name}({arg_names})
%cloned = aten::clone(%ret, %bias)
return (%cloned)
)IR";
{test_value_definitions}
std::vector<IValue> args{{{test_value_names}}};
testStaticRuntime(script, args, {{}}, /*use_allclose=*/false, /*use_equalnan=*/false, /*check_resize=*/{check_resize});
{test_value_definitions2}
std::vector<IValue> args2{{{test_value_names2}}};
testStaticRuntime(script, args, args2, /*use_allclose=*/false, /*use_equalnan=*/false, /*check_resize=*/{check_resize});
}}
"""
return generated
def view_op_test_case_generator(self, g: NativeFunctionsViewGroup) -> str:
schema = g.view.func
schema_str = str(schema)
assert schema_str.find("(") > 0
type_variant_op_name = schema_str[: schema_str.find("(")].replace(".", "_")
op_name = g.view.root_name
assert type_variant_op_name.startswith(op_name)
arg_types = generate_test_ir_arguments(schema)
arg_declarations = ", ".join(
(
arg_name if arg_type is None else f"{arg_name}: {arg_type}"
for arg_name, arg_type in arg_types
)
)
arg_names = ", ".join((arg_name for arg_name, _ in arg_types))
assert (
len(schema.returns) == 1
and isinstance(schema.returns[0].type, BaseType)
and schema.returns[0].type.name is BaseTy.Tensor
)
test_value_definitions = generate_test_value_definitions(schema, 0)
test_value_names = generate_test_value_names(schema, 0)
generated = f"""
TEST(StaticRuntime, autogen_{type_variant_op_name}) {{
const std::string script = R"IR(
graph({arg_declarations}):
%bias: None = prim::Constant()
%ret = aten::{op_name}({arg_names})
%cloned = aten::clone(%ret, %bias)
return (%cloned)
)IR";
{test_value_definitions}
std::vector<IValue> args{{{test_value_names}}};
testStaticRuntime(script, args);
}}
"""
return generated
| pytorch-master | torchgen/static_runtime/generator.py |
import argparse
import itertools
import os
from typing import Sequence, TypeVar, Union
from libfb.py.log import set_simple_logging # type: ignore[import]
from torchgen import gen
from torchgen.context import native_function_manager
from torchgen.model import DispatchKey, NativeFunctionsGroup, NativeFunctionsViewGroup
from torchgen.static_runtime import config, generator
# Given a list of `grouped_native_functions` sorted by their op names, return a list of
# lists each of which groups ops that share the base name. For example, `mean` and
# `mean.dim` are grouped together by this function.
NativeGroupT = TypeVar(
"NativeGroupT",
bound=Union[NativeFunctionsGroup, NativeFunctionsViewGroup],
)
def group_functions_by_op_name(
grouped_native_functions: Sequence[NativeGroupT],
) -> Sequence[Sequence[NativeGroupT]]:
if not grouped_native_functions:
return []
groups = []
def is_supported(g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup]) -> bool:
with native_function_manager(g):
return generator.is_supported(g)
eligible_ops = (g for g in grouped_native_functions if is_supported(g))
groups = [
list(group)
for k, group in (
itertools.groupby(
eligible_ops,
key=lambda g: config.func_name_base_str(g),
)
)
]
return groups
def clang_format(cpp_file_path: str) -> None:
import subprocess
subprocess.run(["clang-format", "-i", cpp_file_path])
def write_cpp(cpp_ops: Sequence[str], file_path: str) -> None:
code = "\n".join(cpp_ops)
generated = f"""// @lint-ignore-every CLANGTIDY HOWTOEVEN
#include <torch/csrc/jit/runtime/static/ops.h>
#include <ATen/CPUFunctions.h>
#include <ATen/InferSize.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Parallel.h>
#include <ATen/ScalarOps.h>
#include <ATen/TensorUtils.h>
#include <ATen/cpu/vec/functional.h>
#include <ATen/cpu/vec/vec.h>
#include <ATen/native/EmbeddingBag.h>
#include <ATen/native/Fill.h>
#include <ATen/native/IndexingUtils.h>
#include <ATen/native/Resize.h>
#include <ATen/native/SharedReduceOps.h>
#include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/native/cpu/SerialStackImpl.h>
#include <ATen/native/layer_norm.h>
#include <ATen/native/quantized/cpu/fbgemm_utils.h>
#include <ATen/native/quantized/cpu/qembeddingbag.h>
#include <ATen/native/quantized/cpu/qembeddingbag_prepack.h>
#include <ATen/quantized/QTensorImpl.h>
#include <ATen/quantized/Quantizer.h>
#include <c10/core/ScalarType.h>
#include <c10/core/WrapDimMinimal.h>
#include <c10/util/irange.h>
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/jit/runtime/static/impl.h>
#include <torch/csrc/jit/runtime/static/te_wrapper.h>
#include <torch/csrc/jit/runtime/vararg_functions.h>
#include <torch/csrc/jit/tensorexpr/ir.h>
#include <torch/csrc/jit/tensorexpr/ir_simplifier.h>
#include <torch/csrc/jit/tensorexpr/llvm_codegen.h>
#include <torch/csrc/jit/tensorexpr/loopnest.h>
namespace torch {{
namespace jit {{
{code}
}} // namespace jit
}} // namespace torch
"""
with open(file_path, "w") as f:
f.write(generated)
clang_format(file_path)
def write_test_cpp(cpp_ops: Sequence[str], file_path: str) -> None:
code = "\n".join(cpp_ops)
generated = f"""// @lint-ignore-every CLANGTIDY HOWTOEVEN
#include <gtest/gtest.h>
#include <torch/csrc/jit/runtime/static/impl.h>
#include <torch/torch.h>
#include "test_utils.h"
using namespace caffe2;
using namespace torch;
using namespace torch::jit;
using namespace torch::jit::test;
using c10::IValue;
{code}
"""
with open(file_path, "w") as f:
f.write(generated)
clang_format(file_path)
def main() -> None:
parser = argparse.ArgumentParser(description="Generate ATen source files")
parser.add_argument(
"-s",
"--source-path",
help="path to source directory for ATen",
default="caffe2/aten/src/ATen",
)
parser.add_argument(
"-p",
"--generated-ops-cpp-path",
help="path to directory to generate op dispatcher .cpp file",
default="caffe2/torch/csrc/jit/runtime/static/generated_ops.cpp",
)
parser.add_argument(
"-t",
"--generated-ops-test-cpp-path",
help="path to directory to generate op dispatcher .cpp file",
default="caffe2/benchmarks/static_runtime/test_generated_ops.cc",
)
options = parser.parse_args()
native_yaml_path = os.path.join(options.source_path, "native/native_functions.yaml")
tags_yaml_path = os.path.join(options.source_path, "native/tags.yaml")
parsed_yaml = gen.parse_native_yaml(native_yaml_path, tags_yaml_path)
native_functions, backend_indices = (
parsed_yaml.native_functions,
parsed_yaml.backend_indices,
)
op_generator = generator.GenOpDispatcher()
test_case_generator = generator.GenOpTestCase()
native_functions_groups = [
g
for g in gen.get_grouped_native_functions(native_functions)
if isinstance(g, NativeFunctionsGroup)
]
supported_functions_groups = group_functions_by_op_name(native_functions_groups)
out_variant_op_result = [
op_generator.out_variant(groups, backend_indices[DispatchKey.CPU])
for groups in supported_functions_groups
]
out_variant_test_result = [
test_case_generator.out_variant(groups) for groups in supported_functions_groups
]
native_functions_view_groups = [
g
for g in gen.get_grouped_by_view_native_functions(native_functions)
if isinstance(g, NativeFunctionsViewGroup)
]
supported_functions_view_groups = group_functions_by_op_name(
native_functions_view_groups
)
view_op_result = [
op_generator.view(groups, backend_indices[DispatchKey.CPU])
for groups in supported_functions_view_groups
]
view_test_result = [
test_case_generator.view(groups) for groups in supported_functions_view_groups
]
op_result = out_variant_op_result + ["\n\n"] + view_op_result
test_result = out_variant_test_result + ["\n\n"] + view_test_result
write_cpp(op_result, options.generated_ops_cpp_path)
write_test_cpp(test_result, options.generated_ops_test_cpp_path)
print(
"\ntotal grouped native ops: %d"
% len(gen.get_grouped_native_functions(native_functions))
)
print("grouped native ops with out variant: %d" % len(native_functions_groups))
supported_functions_num = sum(
[len(groups) for groups in supported_functions_groups]
)
print("generated functions groups with out variant: %d" % supported_functions_num)
print("\nview grouped native ops: %d" % len(native_functions_view_groups))
supported_view_functions_num = sum(
[len(groups) for groups in supported_functions_view_groups]
)
print("generated functions view groups: %d" % supported_view_functions_num)
print(
"\noverall generated : %d"
% (supported_functions_num + supported_view_functions_num)
)
if __name__ == "__main__":
set_simple_logging(escape_newlines=False)
main()
| pytorch-master | torchgen/static_runtime/gen_static_runtime_ops.py |
pytorch-master | torchgen/operator_versions/__init__.py |
|
MOBILE_UPGRADERS_HEADER_DESCRIPTION = """/**
* @generated
* This is an auto-generated file. Please do not modify it by hand.
* To re-generate, please run:
* cd ~/pytorch && python torchgen/operator_versions/gen_mobile_upgraders.py
*/
"""
| pytorch-master | torchgen/operator_versions/gen_mobile_upgraders_constant.py |
#!/usr/bin/env python3
import os
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List
import torch
from torch.jit.generate_bytecode import generate_upgraders_bytecode
from torchgen.code_template import CodeTemplate
from torchgen.operator_versions.gen_mobile_upgraders_constant import (
MOBILE_UPGRADERS_HEADER_DESCRIPTION,
)
class ByteCode(Enum):
instructions = 1
constants = 2
types = 3
operators = 4
register_size = 5
EXCLUDED_OP_SET = [
"aten::full.names",
"aten::full.out",
"aten::full",
]
EXCLUE_UPGRADER_SET = ["full_0_4", "full_out_0_4"]
ONE_INSTRUCTION = CodeTemplate(
"""
Instruction{OpCode::${operator_name}, ${X}, ${N}},"""
)
INSTRUCTION_LIST = CodeTemplate(
"""std::vector<Instruction>({
${instruction_list}
}), // instructions list"""
)
ONE_CONSTANT = CodeTemplate(
"""
c10::IValue(${constant}),"""
)
CONSTANT_LIST = CodeTemplate(
"""std::vector<c10::IValue>({
${constant_list}
}), // constants list"""
)
CONSTANTS_LIST_EMPTY = """std::vector<c10::IValue>(), // constants list"""
ONE_TYPE = CodeTemplate("""c10::parseType("${type_str}"),""")
TYPE_LIST = CodeTemplate(
"""std::vector<c10::TypePtr>({
${type_list}
}), // types list"""
)
TYPE_LIST_EMPTY = """std::vector<c10::TypePtr>(), // types list"""
ONE_OPERATOTR_STRING = CodeTemplate(
"""
OperatorString({"${operator_name}", "${overload_name}", ${num_of_args}}),"""
)
OPERATOR_STRING_LIST = CodeTemplate(
"""
std::vector<OperatorString>({
${operator_string_list}
}), // operators list"""
)
ONE_UPGRADER_FUNCTION = CodeTemplate(
"""
mobile::Function::registerFunc(
"${upgrader_name}",
${instruction_list},
${constant_list},
${type_list},
${register_size}
)"""
)
ONE_UPGRADER_SRC = CodeTemplate(
"""
ByteCodeFunctionWithOperator({
${bytecode_function},
${operator_string_list}
}),"""
)
ONE_UPGRADER_IN_VERSION_MAP = CodeTemplate(
"""Upgrader({${upgrader_min_version}, ${upgrader_max_version}, "${upgrader_name}", ${bytecode_func_index}})"""
) # noqa: E501
ONE_OPERATOR_IN_VERSION_MAP = CodeTemplate(
"""
{std::string("${operator_name}"),
std::vector<Upgrader>({
${upgrader_list_in_version_map}
})},"""
)
OPERATOR_VERSION_MAP = CodeTemplate(
"""
const std::unordered_map<std::string, std::vector<Upgrader>>
getOperatorVersionMapForMobile() {
static std::unordered_map<std::string, std::vector<Upgrader>>
operatorVersionMapForMobile({
${operator_list_in_version_map}
});
return operatorVersionMapForMobile;
}
"""
)
UPGRADER_CPP_SRC = CodeTemplate(
MOBILE_UPGRADERS_HEADER_DESCRIPTION
+ """
#include <caffe2/serialize/versions.h>
#include <torch/csrc/jit/mobile/upgrader_mobile.h>
namespace c10 {
TypePtr parseType(const std::string& pythonStr);
} // namespace c10
namespace torch {
namespace jit {
// clang-format off
// From operator_versions_map
${operator_version_map}
const std::vector<ByteCodeFunctionWithOperator>& getUpgraderBytecodeList() {
auto generate_upgrader_bytecode_list = []() {
std::vector<ByteCodeFunctionWithOperator> upgrader_function_list({
${upgrader_bytecode}
});
for (const auto& upgrader_function : upgrader_function_list) {
for (const auto& op : upgrader_function.operators) {
upgrader_function.function.append_operator(
op.name,
op.overload_name,
op.num_specified_args);
}
}
return upgrader_function_list;
};
static std::vector<ByteCodeFunctionWithOperator> upgraderBytecodeList =
generate_upgrader_bytecode_list();
return upgraderBytecodeList;
}
// clang-format on
} // namespace jit
} // namespace torch
"""
)
UPGRADER_MOBILE_FILE_NAME = "upgrader_mobile.cpp"
UPGRADER_ELEMENT = CodeTemplate(
"""\
Upgrader({${min_version}, ${max_version}, ${operator_name}, ${index}}),
"""
)
PER_OPERATOR_UPGRADER_LIST = CodeTemplate(
"""\
{
std::string(${operator_name}),
std::vector<Upgrader>({${upgrader_list}});
}
"""
)
def construct_instruction(instruction_list_from_yaml: List[Any]) -> str:
instruction_list_part = []
for instruction in instruction_list_from_yaml:
instruction_list_part.append(
ONE_INSTRUCTION.substitute(
operator_name=instruction[0],
X=instruction[1],
N=instruction[2],
)
)
return INSTRUCTION_LIST.substitute(
instruction_list="".join(instruction_list_part).lstrip("\n")
)
def construct_constants(constants_list_from_yaml: List[Any]) -> str:
constants_list_part = []
for constant_from_yaml in constants_list_from_yaml:
convert_constant = None
if isinstance(constant_from_yaml, str):
# Add quotes if it's string
convert_constant = f'"{constant_from_yaml}"'
elif isinstance(constant_from_yaml, bool):
convert_constant = "true" if constant_from_yaml else "false"
elif constant_from_yaml is None:
convert_constant = ""
elif isinstance(constant_from_yaml, int):
convert_constant = str(constant_from_yaml)
else:
raise ValueError(
f"The type of {constant_from_yaml} is {type(constant_from_yaml)}. "
"Please add change in construct_constants function in gen_mobile_upgraders.py."
)
constants_list_part.append(ONE_CONSTANT.substitute(constant=convert_constant))
if len(constants_list_part) == 0:
return CONSTANTS_LIST_EMPTY
return CONSTANT_LIST.substitute(
constant_list="".join(constants_list_part).lstrip("\n")
)
def construct_operators(operator_list_from_yaml: List[Any]) -> str:
operator_list_part = []
for operator in operator_list_from_yaml:
operator_list_part.append(
ONE_OPERATOTR_STRING.substitute(
operator_name=operator[0],
overload_name=operator[1],
num_of_args=operator[2],
)
)
return OPERATOR_STRING_LIST.substitute(
operator_string_list="".join(operator_list_part).lstrip("\n")
)
def construct_types(types_tr_list_from_yaml: List[Any]) -> str:
types_tr_list_part = []
for types_tr in types_tr_list_from_yaml:
types_tr_list_part.append(ONE_TYPE.substitute(type_str=types_tr))
if len(types_tr_list_part) == 0:
return TYPE_LIST_EMPTY
return TYPE_LIST.substitute(type_list="".join(types_tr_list_part).lstrip("\n"))
def construct_register_size(register_size_from_yaml: int) -> str:
if not isinstance(register_size_from_yaml, int):
raise ValueError(
f"Input register size is {register_size_from_yaml} and"
"it's type is {type(register_size_from_yaml)}. An int type is expected."
)
return str(register_size_from_yaml)
def construct_version_maps(
upgrader_bytecode_function_to_index_map: Dict[str, Any]
) -> str:
version_map = torch._C._get_operator_version_map()
sorted_version_map_ = sorted(version_map.items(), key=lambda item: item[0]) # type: ignore[no-any-return]
sorted_version_map = {name: lst for name, lst in sorted_version_map_}
operator_list_in_version_map_part = []
for op_name in sorted_version_map:
upgraders_in_version_map_part = []
# TODO: remove the skip after these two operators schemas are fixed
if op_name in EXCLUDED_OP_SET:
continue
upgrader_ranges = torch._C._get_upgrader_ranges(op_name)
upgrader_entries = sorted_version_map[op_name]
assert len(upgrader_ranges) == len(upgrader_entries)
for idx, upgrader_entry in enumerate(upgrader_entries):
upgrader_name = upgrader_entry.upgrader_name
bytecode_function_index = upgrader_bytecode_function_to_index_map[
upgrader_name
]
upgraders_in_version_map_part.append(
ONE_UPGRADER_IN_VERSION_MAP.substitute(
upgrader_min_version=upgrader_ranges[idx].min_version,
upgrader_max_version=upgrader_ranges[idx].max_version,
upgrader_name=upgrader_name,
bytecode_func_index=bytecode_function_index,
)
)
operator_list_in_version_map_part.append(
ONE_OPERATOR_IN_VERSION_MAP.substitute(
operator_name=op_name,
upgrader_list_in_version_map="".join(upgraders_in_version_map_part),
)
)
return OPERATOR_VERSION_MAP.substitute(
operator_list_in_version_map="".join(operator_list_in_version_map_part).lstrip(
"\n"
)
)
def get_upgrader_bytecode_function_to_index_map(
upgrader_dict: List[Dict[str, Any]]
) -> Dict[str, Any]:
upgrader_bytecode_function_to_index_map = {}
index = 0
for upgrader_bytecode in upgrader_dict:
for upgrader_name, bytecode in upgrader_bytecode.items():
if upgrader_name in EXCLUE_UPGRADER_SET:
continue
upgrader_bytecode_function_to_index_map[upgrader_name] = index
index += 1
return upgrader_bytecode_function_to_index_map
def write_cpp(cpp_path: str, upgrader_dict: List[Dict[str, Any]]) -> None:
body_parts = []
upgrader_bytecode_function_to_index_map = (
get_upgrader_bytecode_function_to_index_map(upgrader_dict)
)
version_map_src = construct_version_maps(upgrader_bytecode_function_to_index_map)
all_upgrader_src_string = []
for upgrader_bytecode in upgrader_dict:
for upgrader_name, bytecode in upgrader_bytecode.items():
# TODO: remove the skip after these two operators schemas are fixed
if upgrader_name in EXCLUE_UPGRADER_SET:
continue
instruction_list_str = ""
constant_list_str = ""
type_list_str = ""
register_size_str = ""
operator_list_str = ""
for table_name, contents in bytecode.items():
element = ByteCode[table_name]
body_string = ""
if element is ByteCode.instructions:
instruction_list_str = construct_instruction(contents)
elif element is ByteCode.constants:
constant_list_str = construct_constants(contents)
elif element is ByteCode.operators:
operator_list_str = construct_operators(contents)
elif element is ByteCode.types:
type_list_str = construct_types(contents)
elif element is ByteCode.register_size:
register_size_str = construct_register_size(contents)
one_upgrader_function_string = ONE_UPGRADER_FUNCTION.substitute(
upgrader_name=upgrader_name,
instruction_list=instruction_list_str,
constant_list=constant_list_str,
type_list=type_list_str,
register_size=register_size_str,
)
one_upgrader_src_string = ONE_UPGRADER_SRC.substitute(
bytecode_function=one_upgrader_function_string.lstrip("\n"),
operator_string_list=operator_list_str.lstrip("\n"),
)
all_upgrader_src_string.append(one_upgrader_src_string)
upgrader_file_content = UPGRADER_CPP_SRC.substitute(
operator_version_map=version_map_src,
upgrader_bytecode="".join(all_upgrader_src_string).lstrip("\n"),
)
body_parts.append(upgrader_file_content)
print("writing file to : ", cpp_path + "/" + UPGRADER_MOBILE_FILE_NAME)
with open(os.path.join(cpp_path, UPGRADER_MOBILE_FILE_NAME), "wb") as out_file:
final_output = "".join(body_parts)
out_file.write(upgrader_file_content.encode("utf-8"))
def sort_upgrader(upgrader_list: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
sorted_upgrader_list = sorted(
upgrader_list, key=lambda one_upgrader: next(iter(one_upgrader))
)
return sorted_upgrader_list
def main() -> None:
upgrader_list = generate_upgraders_bytecode()
sorted_upgrader_list = sort_upgrader(upgrader_list)
for up in sorted_upgrader_list:
print("after sort upgrader : ", next(iter(up)))
pytorch_dir = Path(__file__).resolve().parents[3]
upgrader_path = pytorch_dir / "torch" / "csrc" / "jit" / "mobile"
write_cpp(str(upgrader_path), sorted_upgrader_list)
if __name__ == "__main__":
main()
| pytorch-master | torchgen/operator_versions/gen_mobile_upgraders.py |
#!/usr/bin/env python3
import os
from itertools import chain
from pathlib import Path
from torch.jit._shape_functions import (
bounded_compute_graph_mapping,
shape_compute_graph_mapping,
)
SHAPE_HEADER = r"""
/**
* @generated
* This is an auto-generated file. Please do not modify it by hand.
* To re-generate, please run:
* cd ~/pytorch && python
* torchgen/shape_functions/gen_jit_shape_functions.py
*/
#include <torch/csrc/jit/jit_log.h>
#include <torch/csrc/jit/passes/inliner.h>
#include <torch/csrc/jit/runtime/operator.h>
#include <torch/csrc/jit/runtime/serialized_shape_function_registry.h>
// clang-format off
namespace torch {
namespace jit {
std::string shape_funcs = ""
"""
DECOMP_CENTER = r"""
const std::string& GetSerializedShapeFunctions() {
return shape_funcs;
}
"""
DECOMP_END = r"""
// clang-format on
} // namespace jit
} // namespace torch
"""
SERIALIZED_SHAPE_UTIL_FILE_NAME = "serialized_shape_function_registry.cpp"
def gen_serialized_decompisitions() -> str:
already_serialized_names = set()
unique_funcs = []
all_funcs = chain(
shape_compute_graph_mapping.values(), *bounded_compute_graph_mapping.values()
)
for scripted_func in all_funcs:
if scripted_func.name in already_serialized_names:
continue
already_serialized_names.add(scripted_func.name)
unique_funcs.append(scripted_func)
output_strs = []
curr_str = ""
for scripted_func in unique_funcs:
serialized_code = scripted_func.code
# technically its higher but give a buffer bc there are weird rules
# around some characters
# TODO: this was the limit I found by googling but it seems way
# too short ?
MAX_MSFT_STR_LEN = 2000
if len(curr_str) + len(serialized_code) <= MAX_MSFT_STR_LEN:
curr_str += "\n" + serialized_code
else:
output_strs.append(curr_str)
curr_str = scripted_func.code
output_strs.append(curr_str)
final_output = ""
# Windows compiler doesnt correctly handle adjacent
# string literals
for output_str in output_strs:
start = '+ std::string(R"=====('
end = '\n)=====")\n'
final_output += start + output_str + end
final_output += ";"
return final_output
SHAPE_SCHEMA_START = r"""
const OperatorMap<std::string>& GetShapeFunctionMappings() {
static const OperatorMap<std::string> shape_mappings {
"""
SHAPE_SCHEMA_END = r"""
};
return shape_mappings;
}
"""
def gen_shape_mappings() -> str:
shape_mappings = []
for schema, scripted_func in shape_compute_graph_mapping.items():
shape_mappings.append(' {"' + schema + '", "' + scripted_func.name + '"},')
return SHAPE_SCHEMA_START + "\n".join(shape_mappings) + SHAPE_SCHEMA_END
BOUNDED_SCHEMA_START = r"""
const OperatorMap<std::pair<std::string, std::string>>& GetBoundedShapeMappings() {
static const OperatorMap<std::pair<std::string, std::string>> shape_mappings {
"""
def gen_bounded_mappings() -> str:
bounded_mappings = []
for schema, (lower_func, upper_func) in bounded_compute_graph_mapping.items():
map_str = (
' {"'
+ schema
+ '", {"'
+ lower_func.name
+ '", "'
+ upper_func.name
+ '"}},'
)
bounded_mappings.append(map_str)
return BOUNDED_SCHEMA_START + "\n".join(bounded_mappings) + SHAPE_SCHEMA_END
def write_decomposition_util_file(path: str) -> None:
decomposition_str = gen_serialized_decompisitions()
shape_mappings = gen_shape_mappings()
bounded_mappings = gen_bounded_mappings()
file_components = [
SHAPE_HEADER,
decomposition_str,
DECOMP_CENTER,
shape_mappings,
bounded_mappings,
DECOMP_END,
]
print("writing file to : ", path + "/" + SERIALIZED_SHAPE_UTIL_FILE_NAME)
with open(os.path.join(path, SERIALIZED_SHAPE_UTIL_FILE_NAME), "wb") as out_file:
final_output = "".join(file_components)
out_file.write(final_output.encode("utf-8"))
def main() -> None:
pytorch_dir = Path(__file__).resolve().parents[2]
upgrader_path = pytorch_dir / "torch" / "csrc" / "jit" / "runtime"
write_decomposition_util_file(str(upgrader_path))
if __name__ == "__main__":
main()
| pytorch-master | torchgen/shape_functions/gen_jit_shape_functions.py |
#!/usr/bin/env python3
import os
from pathlib import Path
from torch.jit._decompositions import decomposition_table
# from torchgen.code_template import CodeTemplate
DECOMP_HEADER = r"""
/**
* @generated
* This is an auto-generated file. Please do not modify it by hand.
* To re-generate, please run:
* cd ~/pytorch && python torchgen/decompositions/gen_jit_decompositions.py
*/
#include <torch/csrc/jit/jit_log.h>
#include <torch/csrc/jit/passes/inliner.h>
#include <torch/csrc/jit/runtime/operator.h>
#include <torch/csrc/jit/runtime/decomposition_registry_util.h>
namespace torch {
namespace jit {
const std::string decomp_funcs =
R"("""
DECOMP_CENTER = r"""
)";
const std::string& GetSerializedDecompositions() {
return decomp_funcs;
}
const OperatorMap<std::string>& GetDecompositionMapping() {
// clang-format off
static const OperatorMap<std::string> decomposition_mapping {
"""
DECOMP_END = r"""
};
// clang-format on
return decomposition_mapping;
}
} // namespace jit
} // namespace torch
"""
DECOMPOSITION_UTIL_FILE_NAME = "decomposition_registry_util.cpp"
def gen_serialized_decompisitions() -> str:
return "\n".join(
[scripted_func.code for scripted_func in decomposition_table.values()]
)
def gen_decomposition_mappings() -> str:
decomposition_mappings = []
for schema, scripted_func in decomposition_table.items():
decomposition_mappings.append(
' {"' + schema + '", "' + scripted_func.name + '"},'
)
return "\n".join(decomposition_mappings)
def write_decomposition_util_file(path: str) -> None:
decomposition_str = gen_serialized_decompisitions()
decomposition_mappings = gen_decomposition_mappings()
file_components = [
DECOMP_HEADER,
decomposition_str,
DECOMP_CENTER,
decomposition_mappings,
DECOMP_END,
]
print("writing file to : ", path + "/" + DECOMPOSITION_UTIL_FILE_NAME)
with open(os.path.join(path, DECOMPOSITION_UTIL_FILE_NAME), "wb") as out_file:
final_output = "".join(file_components)
out_file.write(final_output.encode("utf-8"))
def main() -> None:
pytorch_dir = Path(__file__).resolve().parents[3]
upgrader_path = pytorch_dir / "torch" / "csrc" / "jit" / "runtime"
write_decomposition_util_file(str(upgrader_path))
if __name__ == "__main__":
main()
| pytorch-master | torchgen/decompositions/gen_jit_decompositions.py |
from torchgen.api.lazy import LazyIrSchema
from torchgen.api.types import OptionalCType
def ts_lowering_body(schema: LazyIrSchema) -> str:
# for now, we just want one IR class decl and soon after also the method defs
# and we use the functional version not out/inplace.
emplace_arguments = []
for arg in schema.positional_args:
if arg.is_lazy_value:
if isinstance(arg.lazy_type, OptionalCType):
emplace_arguments.append(
f"has_{arg.name} ? loctx->GetOutputOp(operand(i++)) : nullptr"
)
continue
emplace_arguments.append("loctx->GetOutputOp(operand(i++))")
continue
emplace_arguments.append(f'"{arg.name}", {arg.name}')
emplace_arguments_str = "\n ".join(
[f"arguments.emplace_back({a});" for a in emplace_arguments]
)
emplace_kwarg_values = [
f'"{arg.name}", loctx->GetOutputOp(operand(i++))'
for arg in schema.keyword_values
]
emplace_kwarg_scalars = [
f'"{arg.name}", {arg.name}' for arg in schema.keyword_scalars
]
emplace_kwarguments = "\n ".join(
[
f"kwarguments.emplace_back({a});"
for a in emplace_kwarg_values + emplace_kwarg_scalars
]
)
return f"""\
std::vector<torch::jit::NamedValue> arguments;
std::vector<torch::jit::NamedValue> kwarguments;
arguments.reserve({len(emplace_arguments)});
kwarguments.reserve({len(emplace_kwarg_values + emplace_kwarg_scalars)});
size_t i = 0;
{emplace_arguments_str}
{emplace_kwarguments}
torch::lazy::TSOpVector {schema.aten_name}_out = torch::lazy::LowerTSBuiltin(function, op().op, arguments, kwarguments);
TORCH_CHECK_EQ({schema.aten_name}_out.size(), {len(schema.returns)});
return {schema.aten_name}_out;
"""
| pytorch-master | torchgen/dest/lazy_ts_lowering.py |
from dataclasses import dataclass
from typing import Dict, List, Optional, Sequence, Tuple, Union
import torchgen.api.ufunc as ufunc
from torchgen.api.translate import translate
from torchgen.api.types import (
BaseCType,
Binding,
CType,
Expr,
NamedCType,
opmath_t,
scalar_t,
StructuredImplSignature,
VectorizedCType,
)
from torchgen.api.ufunc import UfunctorBindings
from torchgen.context import with_native_function
from torchgen.model import (
Argument,
BaseTy,
BaseType,
DispatchKey,
NativeFunctionsGroup,
ScalarType,
UfuncKey,
)
from torchgen.utils import OrderedSet
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# CUDA STUFF
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# NB: not bothering to generate dispatch stub forward declaration in header,
# we can just paste it whereever necessary
# TODO: use BackendIndex
# dispatch_key: DispatchKey # only CPU/CUDA right now
# Represents functors for implementing CUDA ufuncs.
# Functors are templated by scalar_t because when USERS instantiate functors
# they are templated. A functor looks something like this:
#
# template <typename scalar_t>
# struct CUDAFunctorOnSelf_add {
# using opmath_t = at::opmath_type<scalar_t>;
# opmath_t other_;
# opmath_t alpha_;
# CUDAFunctorOnSelf_add(opmath_t other, opmath_t alpha)
# : other_(other), alpha_(alpha) {}
# __device__ scalar_t operator()(scalar_t self) {
# return ufunc::add(static_cast<opmath_t>(self), other_, alpha_);
# }
# };
#
@dataclass(frozen=True)
class UfunctorSignature:
g: NativeFunctionsGroup
scalar_tensor_idx: Optional[int]
name: str
def arguments(self) -> UfunctorBindings:
return ufunc.ufunctor_arguments(
self.g, scalar_tensor_idx=self.scalar_tensor_idx, scalar_t=scalar_t
)
def fields(self) -> List[Binding]:
# fields are renamed to have a trailing underscore, as is conventional
return [b.rename(f"{b.name}_") for b in self.arguments().ctor]
def returns_type(self) -> CType:
# TODO: don't hardcode; return type will be inferred based on tags on
# the native function
return BaseCType(scalar_t)
def decl_fields(self) -> str:
return "\n".join(f"{f.type} {f.name};" for f in self.fields())
def inline_defn_ctor(self) -> str:
args_str = ", ".join(a.decl() for a in self.arguments().ctor)
# NB: hypothetically could do this with translate but the
# transition here is very regular
init_str = ", ".join(f"{a.name}_({a.name})" for a in self.arguments().ctor)
return f"{self.name}({args_str}) : {init_str} {{}}"
def decl_apply(self) -> str:
args_str = ", ".join(a.decl() for a in self.arguments().apply)
return f"{self.returns_type().cpp_type()} operator()({args_str}) const"
@dataclass(frozen=True)
class UfuncSignature:
g: NativeFunctionsGroup
name: str
compute_t: CType
def arguments(self) -> List[Binding]:
return ufunc.ufunc_arguments(self.g, compute_t=self.compute_t)
def call(self, ctx: Sequence[Union[Binding, Expr]]) -> str:
return f"{self.name}({', '.join(a.expr for a in translate(ctx, self.arguments()))})"
# steps:
# 1. take the functional signature
# 2. use api.ufunc to convert it to template signature. this establishes
# the type of the template function
# 3. use api.ufunc (II) to generate a split struct / operator() signature.
# this establish context in which we call the template signature
#
# StructuredImplSignature context
# ~> functor constructor sig
#
# Functor constructor context
# ~> functor fields sig
#
# Functor apply context (functor fields + functor apply sig)
# ~> template sig
#
def eligible_for_binary_scalar_specialization(g: NativeFunctionsGroup) -> bool:
num_tensors = sum(
1 for a in g.functional.func.arguments.flat_non_out if a.type.is_tensor_like()
)
return num_tensors == 2
def compute_ufunc_cuda_functors(
g: NativeFunctionsGroup,
) -> Tuple[Dict[ScalarType, Dict[UfuncKey, UfunctorSignature]], str]:
# First, build the functors.
ufunctor_sigs: Dict[ScalarType, Dict[UfuncKey, UfunctorSignature]] = {}
ufunctors: List[str] = []
loops = g.out.ufunc_inner_loop
scalar_tensor_idx_lookup = {
UfuncKey.CUDAFunctorOnSelf: 1,
UfuncKey.CUDAFunctorOnOther: 0,
UfuncKey.CUDAFunctor: None,
}
if eligible_for_binary_scalar_specialization(g):
keys = [
UfuncKey.CUDAFunctorOnSelf,
UfuncKey.CUDAFunctorOnOther,
UfuncKey.CUDAFunctor,
]
else:
keys = [UfuncKey.CUDAFunctor]
for k in [UfuncKey.CUDAFunctorOnSelf, UfuncKey.CUDAFunctorOnOther]:
assert k not in loops, f"cannot use {k} on non-binary function"
for k in keys:
# If the key was directly defined, skip functor codegen; we assume the
# user already done it for us
if k in loops:
ufunctor_sig = UfunctorSignature(
g, scalar_tensor_idx=scalar_tensor_idx_lookup[k], name=loops[k].name
)
for dtype in loops[k].supported_dtypes:
ufunctor_sigs.setdefault(dtype, {})[k] = ufunctor_sig
continue
# Note [ScalarOnly and Generic must match names for CUDA]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Otherwise, look in ANY of the generic entries. For simplicity of
# codegen, both ScalarOnly and Generic are defined, the ufunc name
# must match (if they didn't match, we'd have to generate distinct
# functors per dtype, which is awful, so we're not going to do it unless
# someone really forces us to)
ufunc_name = None
supported_dtypes: OrderedSet[ScalarType] = OrderedSet()
for lk in [UfuncKey.ScalarOnly, UfuncKey.Generic]:
if lk not in loops:
continue
if ufunc_name is None:
ufunc_name = loops[lk].name
else:
# See Note [ScalarOnly and Generic must match names for CUDA]
assert (
ufunc_name == loops[lk].name
), "ScalarOnly and Generic must have same ufunc name"
supported_dtypes |= loops[lk].supported_dtypes
assert ufunc_name is not None
name = f"{k}_{ufunc_name}"
ufunctor_sig = UfunctorSignature(
g, scalar_tensor_idx=scalar_tensor_idx_lookup[k], name=name
)
for dtype in supported_dtypes:
ufunctor_sigs.setdefault(dtype, {})[k] = ufunctor_sig
ufunc_sig = UfuncSignature(
g, name=f"ufunc::{ufunc_name}", compute_t=BaseCType(opmath_t)
)
apply_ctx = ufunctor_sig.fields() + ufunctor_sig.arguments().apply
ufunctors.append(
f"""
template <typename scalar_t>
struct {ufunctor_sig.name} {{
using opmath_t = at::opmath_type<scalar_t>;
{ufunctor_sig.decl_fields()}
{ufunctor_sig.inline_defn_ctor()}
__device__ {ufunctor_sig.decl_apply()} {{
return {ufunc_sig.call(apply_ctx)};
}}
}};
"""
)
return ufunctor_sigs, "\n".join(ufunctors)
@dataclass(frozen=True)
class BinaryScalarSpecializationConfig:
scalar_idx: int
ctor_tensor: str
ufunc_key: UfuncKey
BinaryScalarSpecializationConfigs = [
BinaryScalarSpecializationConfig(
scalar_idx=0,
ctor_tensor="self",
ufunc_key=UfuncKey.CUDAFunctorOnOther,
),
BinaryScalarSpecializationConfig(
scalar_idx=1,
ctor_tensor="other",
ufunc_key=UfuncKey.CUDAFunctorOnSelf,
),
]
def compute_ufunc_cuda_dtype_body(
g: NativeFunctionsGroup,
dtype: ScalarType,
inner_loops: Dict[UfuncKey, UfunctorSignature],
parent_ctx: Sequence[Binding],
) -> str:
body = "using opmath_t = at::opmath_type<scalar_t>;"
body += "if (false) {}\n" # for ease of codegen
for config in BinaryScalarSpecializationConfigs:
if config.ufunc_key not in inner_loops:
continue
ufunctor_sig = inner_loops[config.ufunc_key]
scalar_idx = config.scalar_idx + 1
# Make a copy and at the same time widen the type (not permissible
# without copy; we don't want to mutate the input argument anyway)
ctx: List[Union[Expr, Binding]] = list(parent_ctx)
ctx.append(
Expr(
expr=f"iter.scalar_value<opmath_t>({scalar_idx})",
type=NamedCType(config.ctor_tensor, BaseCType(opmath_t)),
)
)
ufunctor_ctor_exprs_str = ", ".join(
a.expr for a in translate(ctx, ufunctor_sig.arguments().ctor)
)
# NB: ufunctor must be allocated before iter.remove_operand is called,
# as it relies on iter
body += f"""\
else if (iter.is_cpu_scalar({scalar_idx})) {{
{ufunctor_sig.name}<scalar_t> ufunctor({ufunctor_ctor_exprs_str});
iter.remove_operand({scalar_idx});
gpu_kernel(iter, ufunctor);
}}"""
ufunctor_sig = inner_loops[UfuncKey.CUDAFunctor]
ufunctor_ctor_exprs_str = ", ".join(
a.expr for a in translate(parent_ctx, ufunctor_sig.arguments().ctor)
)
body += f"""
else {{
gpu_kernel(iter, {ufunctor_sig.name}<scalar_t>({ufunctor_ctor_exprs_str}));
}}
"""
return body
@with_native_function
def compute_ufunc_cuda(g: NativeFunctionsGroup) -> str:
# First, build the functors, indexing them by dtype
ufunctor_sigs, ufunctors = compute_ufunc_cuda_functors(g)
# Next, build the conditionals
sig = StructuredImplSignature(g, ufunc.kernel_name(g, DispatchKey.CUDA))
dtype_cases = []
for dtype, inner_ufunc_sigs in ufunctor_sigs.items():
dtype_cases.append(
f"""
AT_DISPATCH_CASE(at::ScalarType::{dtype},
[&]() {{
{compute_ufunc_cuda_dtype_body(g, dtype, inner_ufunc_sigs, sig.arguments())}
}}
)
"""
)
dtype_cases_str = "\n".join(dtype_cases)
stub_sig = StubSignature(g)
return f"""
{ufunctors}
{stub_sig.type_defn()};
{stub_sig.dispatch_decl()};
{stub_sig.kernel_defn()} {{
AT_DISPATCH_SWITCH(iter.common_dtype(), "{sig.name}",
{dtype_cases_str}
);
}}
REGISTER_DISPATCH({stub_sig.name}, &{stub_sig.kernel_name});
{sig.defn()} {{
{stub_sig.direct_call(sig.arguments())};
}}
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# CPU STUFF
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
@dataclass(frozen=True)
class StubSignature:
g: NativeFunctionsGroup
@property
def name(self) -> str:
return f"{str(self.g.functional.func.name.name)}_stub"
@property
def kernel_name(self) -> str:
return f"{str(self.g.functional.func.name.name)}_kernel"
@property
def type_name(self) -> str:
return f"{str(self.g.functional.func.name.name)}_fn"
def arguments(self) -> List[Binding]:
return ufunc.stub_arguments(self.g)
def type(self) -> str:
cpp_args = self.arguments()
return f"void(*)(TensorIteratorBase&, {', '.join(a.type for a in cpp_args)})"
def dispatch_decl(self) -> str:
return f"DECLARE_DISPATCH({self.type_name}, {self.name})"
def dispatch_defn(self) -> str:
return f"DEFINE_DISPATCH({self.name})"
def kernel_defn(self) -> str:
return f"void {self.kernel_name}(TensorIteratorBase& iter, {', '.join(a.defn() for a in self.arguments())})"
def type_defn(self) -> str:
return f"using {self.type_name} = {self.type()}"
# must be called from context where this is TensorIteratorBase*
def call(self, ctx: Sequence[Binding]) -> str:
return f"{self.name}(device_type(), *this, {', '.join(a.expr for a in translate(ctx, self.arguments()))})"
# used in CUDA to skip the unnecessary dynamic dispatch
def direct_call(self, ctx: Sequence[Binding]) -> str:
return f"{self.kernel_name}(*this, {', '.join(a.expr for a in translate(ctx, self.arguments()))})"
@with_native_function
def compute_ufunc_cpu(g: NativeFunctionsGroup) -> str:
stub_sig = StubSignature(g)
sig = StructuredImplSignature(g, ufunc.kernel_name(g, DispatchKey.CPU))
return f"""
{stub_sig.type_defn()};
{stub_sig.dispatch_decl()};
{stub_sig.dispatch_defn()};
{sig.defn()} {{
{stub_sig.call(sig.arguments())};
}}
"""
def compute_ufunc_cpu_dtype_body(
g: NativeFunctionsGroup,
dtype: ScalarType,
inner_loops: Dict[UfuncKey, UfuncSignature],
parent_ctx: Sequence[Binding],
) -> str:
assert UfuncKey.CPUScalar in inner_loops, f"{dtype}, {inner_loops.keys()}"
assert inner_loops.keys() <= {UfuncKey.CPUScalar, UfuncKey.CPUVector}
scalar_loop = inner_loops[UfuncKey.CPUScalar]
vec_loop = None
if UfuncKey.CPUVector in inner_loops:
vec_loop = inner_loops[UfuncKey.CPUVector]
# NB: We DON'T use translate here, because translate is
# incapable of CSE'ing the scalar accesses in case it is also
# used by Vectorized; also, the unpacking here is very simple
# and only affects Scalar; everything else is implicitly captured
# by the lambda
# Setup scalar in scope
body = []
ctx = []
for b in parent_ctx:
if isinstance(b.argument, Argument) and b.argument.type != BaseType(
BaseTy.Scalar
):
continue
body.append(f"auto _s_{b.name} = {b.name}.to<scalar_t>();")
ctx.append(Expr(f"_s_{b.name}", NamedCType(b.nctype.name, BaseCType(scalar_t))))
if vec_loop is not None:
for b in parent_ctx:
if isinstance(b.argument, Argument) and b.argument.type != BaseType(
BaseTy.Scalar
):
continue
body.append(
f"auto _v_{b.name} = at::vec::Vectorized<scalar_t>(_s_{b.name});"
)
ctx.append(
Expr(
f"_v_{b.name}",
NamedCType(b.nctype.name, VectorizedCType(BaseCType(scalar_t))),
)
)
# Setup lambda signature
# NB: simplified version of ufunctor_arguments
scalar_bindings = []
vec_bindings = []
for a in g.functional.func.arguments.flat_non_out:
if not a.type.is_tensor_like():
continue
assert a.type == BaseType(BaseTy.Tensor)
scalar_bindings.append(
Binding(
name=a.name,
nctype=NamedCType(a.name, BaseCType(scalar_t)),
argument=a,
)
)
if vec_loop is not None:
vec_bindings.append(
Binding(
name=a.name,
nctype=NamedCType(a.name, VectorizedCType(BaseCType(scalar_t))),
argument=a,
)
)
def with_ctx(b: Sequence[Binding]) -> List[Union[Expr, Binding]]:
r: List[Union[Expr, Binding]] = []
r.extend(ctx)
r.extend(b)
return r
body_str = "\n".join(body)
if vec_loop is not None:
return f"""
{body_str}
cpu_kernel_vec(iter,
[=]({', '.join(b.decl() for b in scalar_bindings)}) {{ return {scalar_loop.call(with_ctx(scalar_bindings))}; }},
[=]({', '.join(b.decl() for b in vec_bindings)}) {{ return {vec_loop.call(with_ctx(vec_bindings))}; }}
);
"""
else:
return f"""
{body_str}
cpu_kernel(iter,
[=]({', '.join(b.decl() for b in scalar_bindings)}) {{ return {scalar_loop.call(with_ctx(scalar_bindings))}; }}
);
"""
@with_native_function
def compute_ufunc_cpu_kernel(g: NativeFunctionsGroup) -> str:
stub_sig = StubSignature(g)
# Reindex the ufunc by dtypes; processing generic/scalaronly as well
loops = g.out.ufunc_inner_loop
ufunc_sigs: Dict[ScalarType, Dict[UfuncKey, UfuncSignature]] = {}
for k in [UfuncKey.CPUScalar, UfuncKey.CPUVector]:
lks = []
# ORDER MATTERS: this specifies overriding precedence
if k in loops: # should happen rarely
lks.append(k)
if UfuncKey.ScalarOnly in loops and k is UfuncKey.CPUScalar:
lks.append(UfuncKey.ScalarOnly)
if UfuncKey.Generic in loops:
lks.append(UfuncKey.Generic)
# TODO: don't hardcode ufunc:: namespace here, should be centralized smh
for lk in lks:
for dtype in loops[lk].supported_dtypes:
compute_t: CType
if k is UfuncKey.CPUScalar:
compute_t = BaseCType(scalar_t)
elif k is UfuncKey.CPUVector:
compute_t = VectorizedCType(BaseCType(scalar_t))
else:
raise AssertionError()
inner_ufunc_sigs = ufunc_sigs.setdefault(dtype, {})
if k not in inner_ufunc_sigs:
inner_ufunc_sigs[k] = UfuncSignature(
g, name=f"ufunc::{loops[lk].name}", compute_t=compute_t
)
# Build the conditionals
dtype_cases = []
for dtype, inner_ufunc_sigs in ufunc_sigs.items():
dtype_cases.append(
f"""
AT_DISPATCH_CASE(at::ScalarType::{dtype},
[&]() {{
{compute_ufunc_cpu_dtype_body(g, dtype, inner_ufunc_sigs, stub_sig.arguments())}
}}
)
"""
)
dtype_cases_str = "\n".join(dtype_cases)
return f"""
namespace {{
{stub_sig.kernel_defn()} {{
AT_DISPATCH_SWITCH(iter.common_dtype(), "{stub_sig.name}",
{dtype_cases_str}
);
}}
}} // anonymous namespace
{stub_sig.type_defn()};
{stub_sig.dispatch_decl()};
REGISTER_DISPATCH({stub_sig.name}, &{stub_sig.kernel_name});
"""
| pytorch-master | torchgen/dest/ufunc.py |
from .lazy_ir import (
generate_non_native_lazy_ir_nodes as generate_non_native_lazy_ir_nodes,
GenLazyIR as GenLazyIR,
GenLazyNativeFuncDefinition as GenLazyNativeFuncDefinition,
GenLazyShapeInferenceDefinition as GenLazyShapeInferenceDefinition,
)
from .native_functions import (
compute_native_function_declaration as compute_native_function_declaration,
)
from .register_dispatch_key import (
gen_registration_headers as gen_registration_headers,
gen_registration_helpers as gen_registration_helpers,
RegisterDispatchKey as RegisterDispatchKey,
)
from .ufunc import (
compute_ufunc_cpu as compute_ufunc_cpu,
compute_ufunc_cpu_kernel as compute_ufunc_cpu_kernel,
compute_ufunc_cuda as compute_ufunc_cuda,
)
| pytorch-master | torchgen/dest/__init__.py |
from typing import List, Optional, Union
import torchgen.api.meta as meta
import torchgen.api.structured as structured
from torchgen.api.types import kernel_signature
from torchgen.context import with_native_function_and_index
from torchgen.model import BackendIndex, NativeFunction, NativeFunctionsGroup
from torchgen.utils import mapMaybe
@with_native_function_and_index
def gen_unstructured(f: NativeFunction, backend_index: BackendIndex) -> Optional[str]:
sig = kernel_signature(f, backend_index)
metadata = backend_index.get_kernel(f)
if metadata is None:
return None
if "legacy::" in metadata.kernel:
return None
else:
prefix = "static" if backend_index.external else "TORCH_API"
return f"{prefix} {sig.decl(name=metadata.kernel)};"
@with_native_function_and_index
def gen_structured(g: NativeFunctionsGroup, backend_index: BackendIndex) -> List[str]:
meta_name = meta.name(g)
out_args = structured.impl_arguments(g)
metadata = backend_index.get_kernel(g)
if metadata is None:
return []
prefix = "" if backend_index.external else "TORCH_API "
return [
f"""\
struct {prefix}structured_{metadata.kernel} : public at::meta::structured_{meta_name} {{
void impl({', '.join(a.decl() for a in out_args)});
}};
"""
]
# Generates NativeFunctions.h, a list of forward declarations of all
# actual kernel definitions we keep in aten/src/ATen/native/
@with_native_function_and_index
def compute_native_function_declaration(
g: Union[NativeFunctionsGroup, NativeFunction], backend_index: BackendIndex
) -> List[str]:
metadata = backend_index.get_kernel(g)
if isinstance(g, NativeFunctionsGroup):
if metadata is not None and metadata.structured:
if backend_index.external:
# Structured hasn't been tested with external backends yet.
raise AssertionError(
"Structured external backend functions are not implemented yet."
)
else:
return gen_structured(g, backend_index)
else:
return list(
mapMaybe(lambda f: gen_unstructured(f, backend_index), g.functions())
)
else:
x = gen_unstructured(g, backend_index)
return [] if x is None else [x]
| pytorch-master | torchgen/dest/native_functions.py |
import itertools
from abc import ABC
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
import torchgen.api.dispatcher as dispatcher
from torchgen.api.lazy import (
getValueT,
isValueType,
LazyArgument,
LazyIrProperties,
LazyIrSchema,
tensorListValueT,
)
from torchgen.api.translate import translate
from torchgen.api.types import (
BaseCType,
Binding,
deviceT,
DispatcherSignature,
kernel_signature,
OptionalCType,
VectorCType,
)
from torchgen.context import method_with_native_function
from torchgen.dest.lazy_ts_lowering import ts_lowering_body
from torchgen.model import (
Argument,
BackendIndex,
FunctionSchema,
NativeFunction,
NativeFunctionsGroup,
)
def node_ctor_arg_rvalue_string(arg: LazyArgument) -> str:
"""
Given a LazyArgument,
generate a c++ string for materializing an rvalue of that arg for passing into
a lazy Node constructor.
"""
if isValueType(arg.lazy_type):
if isinstance(arg.lazy_type, BaseCType):
if arg.is_wrapped_scalar:
return f"node_{arg.name}"
elif arg.lazy_type.type is tensorListValueT:
return f"lazy_{arg.name}_tensorlist"
elif arg.is_symint_or_list:
cpp_type = arg.lazy_type.cpp_type()
return f"{cpp_type}(dynamic_cast<torch::lazy::SymIntNodeImpl*>({arg.name}.toSymIntNodeImpl().get())->node_, 0)"
return f"lazy_{arg.name}->GetIrValue()"
elif isinstance(arg.lazy_type, OptionalCType):
if arg.is_wrapped_scalar:
return f"node_{arg.name}"
return (
f"lazy_{arg.name} ? "
f"c10::make_optional(lazy_{arg.name}->GetIrValue()) : "
"c10::nullopt"
)
else:
raise AssertionError(
f"TODO not sure if there are other valid types to handle here ({arg.lazy_type})"
)
else:
if isinstance(arg.lazy_type, VectorCType) and isinstance(
arg.lazy_type.elem, BaseCType
):
return f"std::vector<{arg.lazy_type.elem.type}>({arg.name}.begin(), {arg.name}.end())"
elif (
isinstance(arg.lazy_type, OptionalCType)
and isinstance(arg.lazy_type.elem, VectorCType)
and isinstance(arg.lazy_type.elem.elem, BaseCType)
):
return f"torch::lazy::ToOptionalVector<{arg.lazy_type.elem.elem.type}>({arg.name})"
else:
return f"{arg.name}"
def node_ctor_inputs(schema: LazyIrSchema) -> str:
"""
Produce a formatted string with the arguments as passed into the constructor of a node class.
"""
node_ctor_values = [
node_ctor_arg_rvalue_string(arg) for arg in schema.filtered_args()
]
return ", ".join(node_ctor_values)
def gen_fallback_code(schema: LazyIrSchema, overload_name: str) -> str:
"""
Generate code that falls back to eager conditioned on a predicate
"""
fallback_args = ",\n ".join(
[str(arg.name) for arg in schema.filtered_args(generator=True)]
)
if len(overload_name):
aten_op_str = f"ATEN_OP2({schema.aten_name}, {overload_name})"
else:
aten_op_str = f"ATEN_OP({schema.aten_name})"
or_has_generator = ""
if schema.generator_arg:
# generators are always optional and there is never more than one, at least currently
or_has_generator = f" || ({schema.generator_arg.name}.has_value() && {schema.generator_arg.name}->defined())"
return f"""
if (force_eager_fallback({aten_symbol(schema)}){or_has_generator}) {{
return at::native::call_fallback_fn<<c_eager_fallback, {aten_op_str}>::call(
{fallback_args}
);
}}
"""
def aten_symbol(schema: LazyIrSchema) -> str:
missing_interned_strings = {
"sigmoid_backward",
}
if schema.aten_name in missing_interned_strings:
return f'c10::Symbol::fromQualString("aten::{schema.aten_name}")'
if not schema.aten_name.startswith("at::"):
return f"at::aten::{schema.aten_name}"
else:
return schema.aten_name
# converts all tensor-like arguments to meta tensors. Returns:
# (1) a string containing all of the logic that does the conversions.
# (2) a context, to be used by translate(), with all of the relevant bindings.
def convert_to_meta_tensors(sig: DispatcherSignature) -> Tuple[str, List[Binding]]:
context: List[Binding] = []
unwrapped_tensor_args: List[str] = []
for arg in sig.arguments():
if isinstance(arg.argument, Argument) and arg.argument.type.is_tensor_like():
unwrapped_name = f"{arg.name}_meta"
unwrapped_tensor_args.append(
f"auto {unwrapped_name} = to_meta({arg.name});"
)
context.append(arg.with_name(unwrapped_name))
else:
context.append(arg)
unwrap_tensor_args_str = "\n ".join(unwrapped_tensor_args)
return unwrap_tensor_args_str, context
@dataclass(frozen=True)
class GenLazyIR(ABC):
backend_index: BackendIndex
backend_name: str
node_base: str
@method_with_native_function
def __call__(self, f: Union[NativeFunctionsGroup, NativeFunction]) -> List[str]:
func = f.functional.func if isinstance(f, NativeFunctionsGroup) else f.func
schema = LazyIrSchema(func)
return self.gen(schema)
# there is no lowering functionality generated unless this IR base class is subclassed and
# implemented as a backend-specific node
def lowering_function(self, schema: LazyIrSchema) -> str:
return ""
def create_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
return ""
def can_be_reused_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
return f"""bool CanBeReused({node_ctor_args}) const {{
return false;
}}"""
def node_base_ctor_call(self, schema: LazyIrSchema) -> str:
value_args = schema.filtered_args(values=True, scalars=False)
# backends can customize the way the node base class constructor is called,
# as long as all of its arguments can be generated from information available from the schema
base_ctor_value_args_list = []
for arg in value_args:
if isinstance(arg.lazy_type, BaseCType) or isinstance(
arg.lazy_type, VectorCType
):
base_ctor_value_args_list.append(f"{arg.name}")
elif isinstance(arg.lazy_type, OptionalCType):
base_ctor_value_args_list.append(f"{arg.name}.value_or(kNullValue)")
else:
raise AssertionError(
f"Unsupported type ({arg.lazy_type}) - add support if necessary"
)
base_ctor_value_args = ", ".join(base_ctor_value_args_list)
scalar_args = schema.filtered_args(values=False, scalars=True)
# Shape constuction.
# Conditionally build shape depending on specified shape property
if schema.properties.ShapePrecompute:
shape_ctor_arg = "std::move(shapes),"
elif schema.properties.ShapeCompute:
shape_args = [a.name for a in value_args]
shape_args.extend(a.name for a in scalar_args)
shape_ctor_arg = f"compute_shape_{schema.name}({', '.join(shape_args)}),"
elif schema.properties.ShapeCache:
shape_args = [f"operand({i})" for i in range(len(value_args))]
shape_args.extend(a.name for a in scalar_args)
shape_ctor_arg = f"[&](){{ return compute_shape_{schema.name}({', '.join(shape_args)})[0]; }},"
else:
shape_ctor_arg = ""
scalar_hashes = ", ".join(f"{a.name}" for a in scalar_args)
return f"""{self.node_base}(
{schema.node_name}::ClassOpKind(),
OpList{{{base_ctor_value_args}}},
{shape_ctor_arg}
/* num_outputs */ {len(schema.returns)},
torch::lazy::MHash({scalar_hashes}))"""
def gen(self, schema: LazyIrSchema) -> List[str]:
opkind = schema.opkind or aten_symbol(schema)
# for now, we just want one IR class decl and soon after also the method defs
# and we use the functional version not out/inplace.
all_args = schema.filtered_args()
value_args = schema.filtered_args(values=True, scalars=False)
scalar_args = schema.filtered_args(values=False, scalars=True)
ctor_args = [f"const {i.lazy_type.cpp_type()}& {i.name}" for i in all_args]
reuse_ctor_args = ", ".join(ctor_args)
if schema.properties.ShapePrecompute:
ctor_args.append("std::vector<torch::lazy::Shape>&& shapes")
node_ctor_args = ", ".join(ctor_args)
scalar_initializers = ",\n ".join(
[
# This code is just special casing the mapping from string_view -> strings
f"{a.name}({a.name}.has_value() ? c10::make_optional(std::string(*{a.name})) : c10::nullopt)"
if a.lazy_type.cpp_type() == "c10::optional<c10::string_view>"
else f"{a.name}({a.name})"
for a in scalar_args
]
)
if len(scalar_initializers):
scalar_initializers = f",\n {scalar_initializers}"
scalar_decls = "\n ".join(
[
f"std::string {a.name};"
if a.lazy_type.cpp_type() == "c10::string_view"
else f"c10::optional<std::string> {a.name};"
if a.lazy_type.cpp_type() == "c10::optional<c10::string_view>"
else f"{a.lazy_type.cpp_type()} {a.name};"
for a in scalar_args
]
)
optional_values = [
arg.name
for arg in schema.filtered_args(values=True, scalars=False)
if isinstance(arg.lazy_type, OptionalCType)
]
has_optional_decls = "\n ".join(
[f"bool has_{value}: 1;" for value in optional_values]
)
has_optional_defs = "\n ".join(
[f"has_{value} = !!{value};" for value in optional_values]
)
members_to_string = []
for arg in scalar_args:
if isinstance(arg.lazy_type, OptionalCType):
members_to_string.append(
f"""if ({arg.name}.has_value()) {{
ss << ", {arg.name}=" << {arg.name}.value();
}} else {{
ss << ", {arg.name}=null";
}}"""
)
else:
members_to_string.append(f'ss << ", {arg.name}=" << {arg.name};')
members_to_string_str = "\n ".join(members_to_string)
return [
f"""\
class {schema.node_name} : public {self.node_base} {{
public:
static torch::lazy::OpKind ClassOpKind() {{
return torch::lazy::OpKind({opkind});
}}
{schema.node_name}({node_ctor_args})
: {self.node_base_ctor_call(schema)}{scalar_initializers}
{{
{has_optional_defs}
}}
std::string ToString() const override {{
std::stringstream ss;
ss << {self.node_base}::ToString();
{members_to_string_str}
return ss.str();
}}
{self.create_function(schema, reuse_ctor_args)}
{self.can_be_reused_function(schema, reuse_ctor_args)}
{self.lowering_function(schema)}
{scalar_decls}
{has_optional_decls}
}};
""",
]
@dataclass(frozen=True)
class GenTSLazyIR(GenLazyIR):
def lowering_function(self, schema: LazyIrSchema) -> str:
signature = """
torch::lazy::TSOpVector Lower(
std::shared_ptr<torch::jit::GraphFunction> function,
torch::lazy::TSLoweringContext* loctx) const override"""
if schema.properties.LowerDeclOnly:
return f"{signature};"
elif schema.properties.Lower:
return f"""{signature} {{
{ts_lowering_body(schema)}
}}
"""
else:
return ""
def create_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
signature = f"static NodePtr Create({node_ctor_args})"
if schema.properties.CreateFnDeclOnly:
return f"{signature};"
elif not schema.properties.CreateFn:
return ""
return f"""{signature} {{
return ReuseOrMakeNode<{schema.node_name}>(data);
}}"""
def can_be_reused_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str:
signature = f"bool CanBeReused({node_ctor_args}) const"
if schema.properties.CanBeReusedDeclOnly:
return f"{signature};"
elif not schema.properties.CanBeReused:
return ""
value_comparison = []
for arg in itertools.chain(schema.positional_values, schema.keyword_values):
if isinstance(arg.lazy_type, OptionalCType):
value_comparison.append(
f"nullable_operand(i++) == {arg.name}.value_or(kNullValue)"
)
else:
value_comparison.append(f"operand(i++) == {arg.name}")
for arg in itertools.chain(schema.positional_scalars, schema.keyword_scalars):
if isinstance(arg.lazy_type, OptionalCType):
value_comparison.append(
f"((!this->{arg.name}&&!{arg.name}) || (this->{arg.name}&&{arg.name} && *(this->{arg.name}) == *{arg.name}))"
)
else:
value_comparison.append(f"this->{arg.name} == {arg.name}")
value_comparison_str = " &&\n ".join(value_comparison)
return f"""{signature} {{
size_t i = 0;
return ({value_comparison_str});
}}"""
@dataclass(frozen=True)
class GenLazyNativeFuncDefinition:
class_method_name: str
backend_index: BackendIndex
tensor_class: str
gen_forced_fallback_code: bool
backend_namespace: str
get_tensorlist: str
get_tensor_or_wrap_number: str
try_get_tensor: str
metrics_counter: str
create_tensor: str
create_from_first_tensor: bool
create_aten_from_ltc_tensor: str
tuple_aten_from_ltc_tensors: str
lazy_tensor_ptr: str
get_device_fn: str
def lazy_tensor_decls(self, func: NativeFunction, schema: LazyIrSchema) -> str:
value_args = schema.filtered_args(values=True, scalars=False)
# Generates lazy_{name} variables for LazyTensors wrapping input tensors
lazy_tensor_decls: List[str] = []
for arg in value_args:
if arg.is_wrapped_scalar:
if isinstance(arg.lazy_type, OptionalCType):
lazy_tensor_decls.append(
f"""auto node_{arg.name} = {arg.name} ?
c10::make_optional(torch::lazy::LazyGraphExecutor::Get()->
GetIrValueForScalarFromCodegen(*{arg.name}, *common_device)):
c10::nullopt;"""
)
else:
lazy_tensor_decls.append(
f"""auto node_{arg.name} = torch::lazy::LazyGraphExecutor::Get()->
GetIrValueForScalarFromCodegen({arg.name}, *common_device);"""
)
elif arg.is_symint_or_list:
continue # values are extracted in isValueType
elif isinstance(arg.lazy_type, BaseCType):
if arg.lazy_type.type is tensorListValueT:
lazy_tensor_decls.append(
f"auto lazy_{arg.name}_tensorlist = "
f"{self.backend_namespace}::{self.get_tensorlist}({arg.name});"
)
else:
lazy_tensor_decls.append(
f"{self.lazy_tensor_ptr} lazy_{arg.name} = "
f"{self.backend_namespace}::{self.get_tensor_or_wrap_number}({arg.name}, *common_device);"
)
elif isinstance(arg.lazy_type, OptionalCType):
# TODO(alanwaketan): Maybe we want to apply GetLtcTensorOrCreateForWrappedNumber here, but hold it
# until we encounter a real world example.
lazy_tensor_decls.append(
f"{self.lazy_tensor_ptr} lazy_{arg.name} = "
f"{self.backend_namespace}::{self.try_get_tensor}({arg.name}.value_or(at::Tensor()));"
)
else:
raise AssertionError(
f"TODO not sure if there are other valid types to handle here ({arg.lazy_type})"
)
return ("\n ").join(lazy_tensor_decls)
def force_eager_fallback(self, func: NativeFunction, schema: LazyIrSchema) -> str:
if self.gen_forced_fallback_code:
return gen_fallback_code(schema, overload_name=func.func.name.overload_name)
return ""
def metrics(self, func: NativeFunction, schema: LazyIrSchema) -> str:
return f"{self.metrics_counter};"
def get_device(self, func: NativeFunction, schema: LazyIrSchema) -> str:
value_args = schema.filtered_args(values=True, scalars=False)
scalar_args = schema.filtered_args(values=False, scalars=True)
value_types_names = [f"{a.name}" for a in value_args if not a.is_wrapped_scalar]
optional_device = OptionalCType(BaseCType(deviceT))
optional_devices = [
a.name for a in scalar_args if a.lazy_type == optional_device
]
assert (
len(value_types_names) > 0 or len(optional_devices) > 0
), "Expected at least one Value or Device type"
get_device_str = (
f"{self.get_device_fn}({', '.join(value_types_names + optional_devices)})"
)
return f"""auto common_device = {get_device_str};
TORCH_INTERNAL_ASSERT(common_device);
"""
def shape_inference(self, func: NativeFunction, schema: LazyIrSchema) -> str:
metadata = self.backend_index.get_kernel(func)
assert metadata is not None
all_args = schema.filtered_args()
returns_length = len(schema.returns)
# call the meta kernel if it exists, to compute output shape/dtype for our IR
# Note [Generated LTC Shape Functions]
# LTC uses meta tensors from core to do shape inference when possible, and otherwise
# we generate a shape function declaration that needs to be manually implemented.
# How do we detect which ops are eligible to use meta tensors?
# In general we should be able to use meta tensors not just on structured operators,
# but also on composite operators that are implemented in terms of structured kernels.
# We don't currently have a way of knowing at codegen time which ops are implemented that way.
# This is the case for all view and view_copy operators however, so we're going to
# use them specifically for all of the view_copy ops (instead of manually writing shape rules for all of them).
is_view_copy_op = "view_copy" in func.tags
is_structured = func.structured or func.structured_delegate is not None
if is_structured or is_view_copy_op:
meta_out = """
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};"""
if returns_length > 1:
def this_shape(i: int) -> str:
return f"torch::lazy::Shape(std::get<{i}>(out_meta).scalar_type(), std::get<{i}>(out_meta).sizes().vec())"
shapes_str = ",".join([this_shape(i) for i in range(returns_length)])
meta_out = "std::vector<torch::lazy::Shape> shapes{" + shapes_str + "};"
# Convert tensor args to the meta device and call it.
# (We can't pass in the input tensors directly, because they are "functional wrappers".
# If any of the meta kernels call a tensor op and redispatch, we don't want to hit the functionalize kernels.)
# Even at::meta:: functions might redispatch, e.g. if they call into view ops.
dispatcher_sig = DispatcherSignature.from_schema(func.func)
meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig)
meta_call_args = [
e.expr
for e in translate(
meta_call_ctx, dispatcher_sig.arguments(), method=False
)
]
if is_view_copy_op:
# view_copy ops always have a CompositeExplicitAutogradNonFunctional kernel
assert func.has_composite_explicit_autograd_non_functional_kernel
dispatch_ns = "compositeexplicitautogradnonfunctional"
else:
dispatch_ns = "meta"
shape_str = f"""\
{meta_conversion_str}
auto out_meta = at::{dispatch_ns}::{schema.aten_name}({', '.join(meta_call_args)});
{meta_out}"""
else:
shape_sig = ComputeShapeSignature(metadata.kernel, func)
shape_str = f"""
auto shapes = {shape_sig.shape_call};"""
shape_str += f"""
TORCH_INTERNAL_ASSERT(shapes.size() == {returns_length});"""
# Calculating which dimensions are symbolic
func_schema_str = "aten::" + str(func.func)
shape_str += f"""
if(torch::lazy::symbolicShapeEnabled()){{
std::vector<torch::jit::IValue> inputs = {{ {', '.join(str(a.name) for a in all_args)} }};
const char* schema_str = "{func_schema_str}";
applySymbolicShapesOnLT(schema_str, inputs, shapes);
}}
"""
return shape_str
def build_ir_node(self, func: NativeFunction, schema: LazyIrSchema) -> str:
node_ctor_input_str = node_ctor_inputs(schema)
return f"""torch::lazy::NodePtr node = torch::lazy::ReuseNode<{schema.node_name}>({node_ctor_input_str});
if (!node) {{
{self.shape_inference(func, schema)}
node = torch::lazy::MakeNode<{schema.node_name}>({node_ctor_input_str}, std::move(shapes));
CacheNode(node);
}}
"""
def create_lazy_tensor(self, first_tensor_name: Optional[str] = None) -> str:
# xla uses an instance method for tensor creation, for the time being
if self.create_from_first_tensor:
# TODO(whc) remove this if XLA switches to using static method for creation
assert (
first_tensor_name is not None
), "Requires first tensor to create lazy tensor"
return f"{first_tensor_name}.{self.create_tensor}"
return f"{self.backend_namespace}::{self.create_tensor}"
def return_aten_tensor(self, func: NativeFunction, schema: LazyIrSchema) -> str:
returns_length = len(schema.returns)
value_args = schema.filtered_args(values=True, scalars=False)
value_types_names = [f"{a.name}" for a in value_args if not a.is_wrapped_scalar]
first_tensor_name = value_types_names[0] if len(value_types_names) > 0 else None
bridge_str = f"""auto result = {self.create_aten_from_ltc_tensor}(
{self.create_lazy_tensor(first_tensor_name)}(std::move(node), *common_device));"""
if returns_length > 1:
assert (
len(value_types_names) > 0
), "Code below assumes there is at least one tensor arg"
bridge_str = f"""std::vector<{self.lazy_tensor_ptr}> lazy_tensors;
for (int i = 0; i < {returns_length}; i++) {{
lazy_tensors.push_back({self.create_lazy_tensor(first_tensor_name)}({getValueT()}(node, i), *common_device));
}}
auto result = {self.tuple_aten_from_ltc_tensors}<{returns_length}>(lazy_tensors);"""
if schema.name.name.inplace or func.func.is_out_fn():
assert returns_length == 1, (
"We assumed there was no such case where an op is an in-place variant "
f"and has tuple outputs, but got tuple of len {returns_length}."
)
bridge_str = f"""lazy_{first_tensor_name}->SetInPlaceIrValue(node);
auto& result = {first_tensor_name};"""
bridge_str += """
return result;"""
return bridge_str
@method_with_native_function
def __call__(self, func: NativeFunction) -> List[str]:
sig = kernel_signature(func, self.backend_index)
metadata = self.backend_index.get_kernel(func)
assert metadata is not None
schema = LazyIrSchema(func.func)
return [
f"""\
{sig.decl(name=f"{self.class_method_name}::{metadata.kernel}")} {{
{self.force_eager_fallback(func, schema)}
{self.metrics(func, schema)}
{self.get_device(func, schema)}
{self.lazy_tensor_decls(func, schema)}
{self.build_ir_node(func, schema)}
{self.return_aten_tensor(func, schema)}
}};\n
"""
]
class ComputeShapeSignature:
"""
Here we use the base name as the suffix of the signature to avoid generating for in-place variants.
"""
def __init__(self, kernel_name: str, f: NativeFunction):
self.__schema = LazyIrSchema(f.func)
self.__dispatch_args = ", ".join(
[a.decl() for a in dispatcher.arguments(f.func)]
)
self.__call_args = ", ".join(
[f"{arg.name}" for arg in self.__schema.filtered_args(generator=True)]
)
self.__kernel_name = kernel_name
def __decl_suffix(self) -> str:
return f"{self.__kernel_name}({self.__dispatch_args})"
def __call_suffix(self) -> str:
return f"{self.__kernel_name}({self.__call_args})"
@property
def shape_decl(self) -> str:
return f"TORCH_API std::vector<torch::lazy::Shape> compute_shape_{self.__decl_suffix()}"
@property
def shape_call(self) -> str:
return f"torch::lazy::compute_shape_{self.__call_suffix()}"
@dataclass(frozen=True)
class GenLazyShapeInferenceDefinition:
backend_index: BackendIndex
tensor_class: str
@method_with_native_function
def __call__(self, f: NativeFunction) -> List[str]:
sig = kernel_signature(f, self.backend_index)
metadata = self.backend_index.get_kernel(f)
assert metadata is not None
# See Note [Generated LTC Shape Functions]
is_view_copy_op = "view_copy" in f.tags
is_structured = f.structured or f.structured_delegate is not None
if is_structured or is_view_copy_op:
return []
else:
shape_sig = ComputeShapeSignature(metadata.kernel, f)
return ["\n".join([f"{shape_sig.shape_decl};"])]
def generate_non_native_lazy_ir_nodes(
non_native: List[Dict[str, Any]], gen_lazy_ir: GenLazyIR
) -> List[str]:
"""Generate the non-native lazy IR node classes"""
nodes = []
for op in non_native:
# Set default properties for Non-Native IRs
properties = LazyIrProperties("ShapeCache", "CanBeReused", "LowerDeclOnly")
for p in op.get("properties", []):
setattr(properties, p, True)
schema = LazyIrSchema(FunctionSchema.parse(op["func"]), properties)
schema.opkind = op.get("opkind")
nodes.append(gen_lazy_ir.gen(schema)[0])
return nodes
| pytorch-master | torchgen/dest/lazy_ir.py |
import itertools
import textwrap
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
from typing_extensions import Literal
import torchgen.api.cpp as cpp
import torchgen.api.meta as meta
import torchgen.api.structured as structured
from torchgen.api.translate import translate
from torchgen.api.types import (
BaseCType,
Binding,
ConstRefCType,
CppSignature,
CppSignatureGroup,
DispatcherSignature,
Expr,
kernel_signature,
MutRefCType,
NamedCType,
NativeSignature,
tensorT,
)
from torchgen.context import method_with_native_function, native_function_manager
from torchgen.model import (
Argument,
BackendIndex,
DeviceCheckType,
DispatchKey,
gets_generated_out_inplace_wrapper,
is_cuda_dispatch_key,
NativeFunction,
NativeFunctionsGroup,
SchemaKind,
TensorOptionsArguments,
)
from torchgen.selective_build.selector import SelectiveBuilder
from torchgen.utils import assert_never, mapMaybe, Target
def gen_registration_headers(
backend_index: BackendIndex,
per_operator_headers: bool,
rocm: bool,
) -> List[str]:
if per_operator_headers:
headers = ["#include <ATen/ops/as_strided_native.h>"]
else:
headers = ["#include <ATen/NativeFunctions.h>"]
if backend_index.dispatch_key in (DispatchKey.CPU, DispatchKey.Meta):
headers.append("#include <ATen/EmptyTensor.h>")
elif backend_index.dispatch_key == DispatchKey.CUDA:
if rocm:
headers.append("#include <ATen/hip/EmptyTensor.h>")
else:
headers.append("#include <ATen/cuda/EmptyTensor.h>")
elif backend_index.dispatch_key == DispatchKey.MPS:
headers.append("#include <ATen/mps/EmptyTensor.h>")
elif per_operator_headers:
headers += [
"#include <ATen/ops/empty.h>",
"#include <ATen/ops/empty_strided.h>",
"#include <ATen/ops/_copy_from_and_resize.h>",
"#include <ATen/ops/_copy_from.h>",
]
else:
headers.append("#include <ATen/Functions.h>")
return headers
def gen_empty_impl_names(
backend_index: BackendIndex,
) -> Tuple[Optional[str], Optional[str]]:
empty_impl = None
empty_strided_impl = None
if backend_index.dispatch_key in (
DispatchKey.Meta,
DispatchKey.CPU,
DispatchKey.CUDA,
DispatchKey.MPS,
):
dispatch = str(backend_index.dispatch_key).lower()
empty_impl = f"at::detail::empty_{dispatch}"
empty_strided_impl = f"at::detail::empty_strided_{dispatch}"
elif backend_index.dispatch_key in (
DispatchKey.CompositeExplicitAutogradNonFunctional,
DispatchKey.QuantizedCPU,
DispatchKey.QuantizedCUDA,
):
empty_impl = "at::empty"
empty_strided_impl = "at::empty_strided"
return empty_impl, empty_strided_impl
def gen_create_out_helper(backend_index: BackendIndex) -> List[str]:
if backend_index.dispatch_key == DispatchKey.Meta:
empty_options = "options.device(at::kMeta)"
else:
empty_options = "options"
empty_impl, empty_strided_impl = gen_empty_impl_names(backend_index)
if empty_impl is None:
return []
return [
f"""
Tensor create_out(IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {{
if (strides.empty()) {{
return {empty_impl}(sizes, {empty_options});
}} else {{
return {empty_strided_impl}(sizes, strides, {empty_options});
}}
}}
"""
]
def gen_maybe_create_proxy_helper(backend_index: BackendIndex) -> List[str]:
_, empty_strided_impl = gen_empty_impl_names(backend_index)
return (
[]
if empty_strided_impl is None
else [
f"""
c10::optional<Tensor> maybe_create_proxy(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {{
if (out.strides() != strides) {{
return {empty_strided_impl}(sizes, strides, options);
}}
return c10::nullopt;
}}
"""
]
)
def gen_resize_out_helper(backend_index: BackendIndex) -> List[str]:
if backend_index.dispatch_key == DispatchKey.CompositeExplicitAutogradNonFunctional:
# The function isn't used by this key (since only functional ops have a kernel for this key),
# so we need to not include it to avoid a defined-but-not-used error.
return []
return [
"""
void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
TORCH_CHECK(options.dtype() == out.dtype(),
"Expected out tensor to have dtype ", options.dtype(), ", but got ", out.dtype(), " instead");
TORCH_CHECK(options.device() == out.device(),
"Expected out tensor to have device ", options.device(), ", but got ", out.device(), " instead");
const bool resized = at::native::resize_output(out, sizes);
// Only restride if a resize occurred; otherwise we ignore the (advisory)
// strides from the meta function and directly use the output tensor's
// preexisting strides
if (resized) {
if (!strides.empty()) {
TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value());
at::native::as_strided_(out, sizes, strides);
} else if (options.memory_format_opt().has_value()) {
out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt());
}
}
}
"""
]
def gen_check_inplace_helper(backend_index: BackendIndex) -> List[str]:
return [
"""
void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) {
// These checks are needed on those operators that:
// 1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm')
// 2) have particular typing rules (e.g. 'cumsum' and 'cumprod')
// For other operators (e.g. 'add'), 'TensorIterator' already checks
// these things separately.
TORCH_CHECK(options.dtype() == self.dtype(),
"Bad in-place call: ",
"input tensor dtype ", self.dtype(), " and output tensor dtype ", options.dtype(), " should match");
TORCH_CHECK(options.device() == self.device(),
"Bad in-place call: ",
"input tensor device ", self.device(), " and output tensor device ", options.device(), " should match");
TORCH_CHECK(sizes == self.sizes(),
"Bad in-place call: ",
"input tensor size ", self.sizes(), " and output tensor size ", sizes, " should match");
}
"""
]
def gen_registration_helpers(backend_index: BackendIndex) -> List[str]:
return [
*gen_create_out_helper(backend_index),
*gen_resize_out_helper(backend_index),
*gen_check_inplace_helper(backend_index),
*gen_maybe_create_proxy_helper(backend_index),
]
# Generates Register{dispatch}.cpp (e.g., RegisterCPU.cpp).
#
# - The primary function of this file is to register all of the
# implementations for the given dispatch key to the dispatcher,
# so they are available for use in PyTorch. If dispatch is
# None, we generate schema (def) registrations and catchall
# registrations.
# - The secondary function of this file is to generate a wrapper
# around functions. In CPUType these wrappers do nothing
# (and should be removed), but in other cases they handle
# DeviceGuard. A small extra benefit of wrappers is they
# are not overloaded, so they can be used in the registration
# API without having to disambiguate which overload you want
# (as would be the case if you directly registered native::
# functions).
# - The tertiary function of this file is to generate *static*
# cpp API bindings which can be used to bypass dispatcher
# directly to kernels, but with user-friendly cpp-style API
@dataclass(frozen=True)
class RegisterDispatchKey:
backend_index: BackendIndex
target: Union[
Literal[Target.ANONYMOUS_DEFINITION],
Literal[Target.NAMESPACED_DEFINITION],
Literal[Target.NAMESPACED_DECLARATION],
Literal[Target.REGISTRATION],
]
# Selector object to determine which operators to generate
# registration code for.
selector: SelectiveBuilder
# Whether or not we are actually code-genning for ROCm
rocm: bool
# The class that all unstructured native functions live under. This is used to improve
# compiler error messages when a kernel writer adds a native function with the wrong signature.
# This is only used in unstructured kernels, since structured kernels already live in a class.
# Finally, this field is currently Optional because it is only used by external backends.
# It would be nice if we can add the same logic to in-tree kernels too, but that requires updating
# all of the existing kernel signatures scattered across aten/src/ATen/native.
class_method_name: Optional[str]
# Only set to true in lightweight dispatch. If lightweight dispatch is enabled we are registering
# operators into JIT op registry, thus we need to avoid generating code to register into the dispatcher.
skip_dispatcher_op_registration: bool
@staticmethod
def gen_device_check(
type: DeviceCheckType, args: List[Argument], method_name: str
) -> str:
if type == DeviceCheckType.NoCheck:
return " // No device check\n"
device_check = "c10::optional<Device> common_device = nullopt;\n"
device_check += "(void)common_device; // Suppress unused variable warning\n"
for arg in args:
# Only tensor like arguments are eligible
if arg.type.is_tensor_like():
device_check += f"""
c10::impl::check_and_update_common_device(common_device, {arg.name}, "{method_name}", "{arg.name}");"""
return device_check
@method_with_native_function
def __call__(self, f: Union[NativeFunctionsGroup, NativeFunction]) -> List[str]:
if isinstance(f, NativeFunctionsGroup):
g: NativeFunctionsGroup = f
# Note: We call gen_structured() if the operator is marked structured, regardless of the backend.
# gen_structured() has special logic to handle auto-generated kernels.
if g.structured:
return self.gen_structured(g)
else:
return list(
mapMaybe(lambda f: self.gen_unstructured(f, g), g.functions())
)
elif isinstance(f, NativeFunction):
r = self.gen_unstructured(f)
return [] if r is None else [r]
else:
assert_never(f)
def wrapper_kernel_sig(
self, f: NativeFunction
) -> Union[NativeSignature, DispatcherSignature]:
# The prefix is just to ensure uniqueness. The Dispatcher API doesn't guarantee unique kernel names.
return kernel_signature(
f, self.backend_index, prefix=f"wrapper_{f.func.name.overload_name}_"
)
def gen_out_inplace_wrapper(
self, f: NativeFunction, g: Optional[NativeFunctionsGroup]
) -> Optional[str]:
if g is None:
return None
k = f.func.kind()
if k is SchemaKind.inplace:
copy_op = "at::_copy_from"
elif k is SchemaKind.out:
copy_op = "at::_copy_from_and_resize"
else:
raise AssertionError("gen_out_inplace_wrapper called on a functional op")
sig = self.wrapper_kernel_sig(f)
name = sig.name()
func_res = f"{name}_tmp"
return_names = cpp.return_names(f)
if len(return_names) > 1:
updates = "\n ".join(
f"{copy_op}(std::get<{i}>({func_res}), {ret_name});"
for i, ret_name in enumerate(return_names)
)
returns = f'{sig.returns_type().cpp_type()}({", ".join(return_names)})'
else:
ret_name = return_names[0]
updates = f"{copy_op}({func_res}, {ret_name});"
returns = ret_name
functional_sig = self.wrapper_kernel_sig(g.functional)
wrapper_name = sig.name()
return f"""\
{sig.defn(name=wrapper_name)} {{
auto {func_res} = {functional_sig.name()}({", ".join(e.expr for e in translate(sig.arguments(), functional_sig.arguments()))});
{updates}
return {returns};
}}
"""
def gen_structured(self, g: NativeFunctionsGroup) -> List[str]:
metadata = self.backend_index.get_kernel(g)
if self.backend_index.dispatch_key == DispatchKey.Meta:
assert not self.backend_index.has_kernel(g.out), (
"Do not explicitly specify Meta dispatch key on structured "
"functions, they will be automatically generated for you"
)
elif (
self.backend_index.dispatch_key
== DispatchKey.CompositeExplicitAutogradNonFunctional
):
assert not self.backend_index.has_kernel(g.out), (
"Do not explicitly specify CompositeExplicitAutograd dispatch key on structured "
"functions, they will be automatically generated for you"
)
elif metadata is None or not metadata.structured:
return list(mapMaybe(lambda f: self.gen_unstructured(f, g), g.functions()))
structured_gen = StructuredRegisterDispatchKey(
self.backend_index,
self.target,
self.selector,
self.rocm,
self.class_method_name,
self.skip_dispatcher_op_registration,
g,
)
return list(mapMaybe(structured_gen.gen_one, g.functions()))
def gen_unstructured(
self, f: NativeFunction, g: Optional[NativeFunctionsGroup] = None
) -> Optional[str]:
with native_function_manager(f):
inplace_meta = False
gets_out_inplace_wrapper = False
if not self.backend_index.has_kernel(f):
if (
self.backend_index.dispatch_key == DispatchKey.Meta
and f.func.kind() is SchemaKind.inplace
and
# Defer to composites for meta implementation
not f.has_composite_kernel
and
# Inplace list operations are not supported
len(f.func.returns) == 1
):
inplace_meta = True
elif (
not self.backend_index.use_out_as_primary
and g is not None
and gets_generated_out_inplace_wrapper(f, g, self.backend_index)
):
# We want to generate inplace/out wrappers, that don't have a kernel for the backend.
gets_out_inplace_wrapper = True
else:
return None
if f.manual_kernel_registration:
return None
if (
self.target is Target.REGISTRATION
and not self.selector.is_native_function_selected(f)
):
return None
sig = self.wrapper_kernel_sig(f)
name = sig.name()
returns_type = sig.returns_type().cpp_type()
args = sig.arguments()
args_str = ", ".join(a.defn() for a in args)
# See Note [Direct dispatch bindings]
cpp_sig_group = CppSignatureGroup.from_native_function(
f, method=False, fallback_binding=False
)
if self.target is Target.NAMESPACED_DECLARATION:
result = f"TORCH_API {cpp_sig_group.signature.decl()};\n"
if cpp_sig_group.faithful_signature is not None:
result += f"TORCH_API {cpp_sig_group.faithful_signature.decl()};\n"
return result
elif self.target is Target.NAMESPACED_DEFINITION:
def generate_defn(cpp_sig: CppSignature) -> str:
return f"""
{cpp_sig.defn()} {{
return {sig.name()}({', '.join(e.expr for e in translate(cpp_sig.arguments(), sig.arguments()))});
}}
"""
result = generate_defn(cpp_sig_group.signature)
if cpp_sig_group.faithful_signature is not None:
result += generate_defn(cpp_sig_group.faithful_signature)
return result
elif self.target is Target.ANONYMOUS_DEFINITION:
# short circuit for inplace_meta
if inplace_meta:
assert f.func.arguments.self_arg is not None
self_arg_name = f.func.arguments.self_arg.argument.name
# TODO: handle in place on tensor list
return f"""
{returns_type} {name}({args_str}) {{
TORCH_CHECK_NOT_IMPLEMENTED({self_arg_name}.is_meta(),
"Cannot inplace into non-meta tensor with meta tensor argument");
return {self_arg_name};
}}
"""
# short circuit for generated inplace/out wrappers
if gets_out_inplace_wrapper:
return self.gen_out_inplace_wrapper(f, g)
metadata = self.backend_index.get_kernel(f)
if metadata is None:
return None
if self.class_method_name is None:
impl_name = f"{metadata.cpp_namespace}::{metadata.kernel}"
else:
impl_name = f"{metadata.cpp_namespace}::{self.class_method_name}::{metadata.kernel}"
args_exprs_str = ", ".join(a.name for a in args)
device_check = " // No device check\n"
# Backends that require device guards presumably also require device checks.
if self.backend_index.device_guard:
device_check_args = itertools.chain(
f.func.arguments.out, f.func.arguments.flat_positional
)
device_check = RegisterDispatchKey.gen_device_check(
f.device_check, list(device_check_args), name
)
device_guard = "// DeviceGuard omitted" # default
if f.device_guard and self.backend_index.device_guard:
has_tensor_options = any(
isinstance(a, TensorOptionsArguments)
for a in f.func.arguments.non_out
)
if has_tensor_options:
# kernel is creating a tensor
device_guard = """
const DeviceGuard device_guard(device_or_default(device));"""
# CUDA requires special handling
if is_cuda_dispatch_key(self.backend_index.dispatch_key):
device_guard = (
f"globalContext().lazyInitCUDA();\n{device_guard}"
)
else:
# kernel is operating on existing tensors
# There is precedence for which argument we use to do
# device guard. This describes the precedence order.
self_arg = (
[f.func.arguments.self_arg.argument]
if f.func.arguments.self_arg is not None
else []
)
candidate_args = itertools.chain(
self_arg,
f.func.arguments.out,
f.func.arguments.flat_positional,
)
# Only tensor like arguments are eligible
device_of = next(
(
f"{a.name}"
for a in candidate_args
if a.type.is_tensor_like()
),
None,
)
if device_of is not None:
device_guard = f"const OptionalDeviceGuard device_guard(device_of({device_of}));"
return f"""\
namespace {{
{returns_type} {name}({args_str}) {{
{device_check}
{device_guard}
return {impl_name}({args_exprs_str});
}}
}} // anonymous namespace
"""
elif self.target is Target.REGISTRATION:
if f.manual_kernel_registration or self.skip_dispatcher_op_registration:
return None
else:
payload = f"TORCH_FN({name})"
return f'm.impl("{f.func.name}",\n{payload});\n'
else:
assert_never(self.target)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# STRUCTURED
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
@dataclass(frozen=True)
class StructuredRegisterDispatchKey(RegisterDispatchKey):
g: NativeFunctionsGroup
def gen_class_set_output_functions(
self, k: SchemaKind, parent_class: str, generate_super: bool
) -> str:
if generate_super:
set_output_super = f"{parent_class}::set_output_raw_strided(output_idx, sizes, strides, options, names);"
else:
set_output_super = ""
def gen_set_output_function(name: str, maybe_create_proxy: bool) -> str:
maybe_star = "*" if k is SchemaKind.functional else ""
return f"""
void set_output_{name}(
int64_t output_idx, IntArrayRef sizes, IntArrayRef strides,
TensorOptions options, DimnameList names
) override {{
{textwrap.indent(self.gen_class_set_output_body(k, maybe_create_proxy), " ")}
if (!names.empty()) {{
namedinference::propagate_names({maybe_star}outputs_[output_idx], names);
}}
// super must happen after, so that downstream can use maybe_get_output
// to retrieve the output
{textwrap.indent(set_output_super, " ")}
}}
"""
return f"""
{gen_set_output_function("strided", maybe_create_proxy=True)}
{gen_set_output_function("raw_strided", maybe_create_proxy=False)}
"""
def gen_class_set_output_body(self, k: SchemaKind, maybe_create_proxy: bool) -> str:
if self.backend_index.dispatch_key in [
DispatchKey.CUDA,
DispatchKey.MPS,
DispatchKey.CompositeExplicitAutogradNonFunctional,
]:
maybe_set_guard = """
auto current_device = guard_.current_device();
if (C10_UNLIKELY(current_device.has_value())) {
TORCH_INTERNAL_ASSERT(*current_device == options.device(),
"structured kernels don't support multi-device outputs");
} else {
guard_.reset_device(options.device());
}
"""
maybe_set_guard_line = maybe_set_guard + "\n"
else:
maybe_set_guard_line = maybe_set_guard = ""
if maybe_create_proxy:
create_proxy = """
auto maybe_proxy = maybe_create_proxy(out, sizes, strides, options);
if (C10_UNLIKELY(maybe_proxy.has_value())) {
proxy_outputs_[output_idx] = c10::ExclusivelyOwned<Tensor>(std::move(maybe_proxy).value());
}
"""
else:
create_proxy = ""
if k is SchemaKind.functional:
assert self.backend_index.dispatch_key in (
DispatchKey.Meta,
DispatchKey.CPU,
DispatchKey.CUDA,
DispatchKey.MPS,
DispatchKey.CompositeExplicitAutogradNonFunctional,
)
return f"""{maybe_set_guard_line}
outputs_[output_idx] = create_out(sizes, strides, options);"""
elif k is SchemaKind.inplace:
return f"""{maybe_set_guard_line}
const auto& out = outputs_[output_idx].get();
check_inplace(out, sizes, options);
{create_proxy}"""
elif k is SchemaKind.out:
return f"""{maybe_set_guard_line}
const auto& out = outputs_[output_idx].get();
resize_out(out, sizes, strides, options);
{create_proxy}"""
elif k is SchemaKind.mutable or k is SchemaKind.scratch:
raise AssertionError(
f"{k} structured operators are currently not supported"
)
else:
assert_never(k)
# returns the definition of a ctor, as well as how to construct
# this class to a variable named op
def gen_class_ctor(self, k: SchemaKind, class_name: str, returns: int) -> str:
if k is SchemaKind.functional:
return ""
elif k is SchemaKind.inplace:
# TODO: Make sure out argument is guaranteed to be self
return f"{class_name}(Tensor& self) : outputs_{{std::ref(self)}} {{}}"
elif k is SchemaKind.out:
out_args = ", ".join(f"Tensor& out{i}" for i in range(returns))
out_refs = ", ".join(f"std::ref(out{i})" for i in range(returns))
return f"{class_name}({out_args}) : outputs_{{ {out_refs} }} {{}}"
elif k is SchemaKind.mutable or k is SchemaKind.scratch:
raise AssertionError(
f"{k} structured operators are currently not supported"
)
else:
assert_never(k)
def gen_class(
self,
f: NativeFunction,
k: SchemaKind,
*,
class_name: str,
parent_class: str,
generate_super: bool,
) -> str:
if k is SchemaKind.functional:
output_type = "c10::ExclusivelyOwned<Tensor>"
output_value = "*outputs_[output_idx]"
proxy_field = ""
elif k is SchemaKind.inplace:
output_type = "std::reference_wrapper<Tensor>"
output_value = "proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get()"
proxy_field = f"std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, {len(f.func.returns)}> proxy_outputs_;"
elif k is SchemaKind.out:
output_type = "std::reference_wrapper<Tensor>"
output_value = "proxy_outputs_[output_idx].has_value() ? **proxy_outputs_[output_idx] : outputs_[output_idx].get()"
proxy_field = f"std::array<c10::optional<c10::ExclusivelyOwned<Tensor>>, {len(f.func.returns)}> proxy_outputs_;"
if self.backend_index.dispatch_key == DispatchKey.CUDA:
if self.rocm:
guard_field = "c10::hip::OptionalHIPGuardMasqueradingAsCUDA guard_;"
else:
guard_field = "c10::cuda::OptionalCUDAGuard guard_;"
elif (
self.backend_index.dispatch_key
== DispatchKey.CompositeExplicitAutogradNonFunctional
):
guard_field = "c10::OptionalDeviceGuard guard_;"
elif self.backend_index.dispatch_key == DispatchKey.MPS:
# TODO: Move to OptionalMPSGuard.
guard_field = "c10::OptionalDeviceGuard guard_;"
else:
guard_field = ""
indent = " " * 4
class_ctor_str = self.gen_class_ctor(k, class_name, len(f.func.returns))
lines = (
f"struct {class_name} final : public {parent_class} {{",
f"{textwrap.indent(class_ctor_str, indent)}",
f"{textwrap.indent(self.gen_class_set_output_functions(k, parent_class, generate_super), indent)}",
" const Tensor& maybe_get_output(int64_t output_idx) override {",
f" return {output_value};\n",
" }",
f" std::array<{output_type}, {len(f.func.returns)}> outputs_;",
f"{textwrap.indent(proxy_field, indent)}",
f"{textwrap.indent(guard_field, indent)}",
"};",
)
return "\n".join(line for line in lines if line)
@method_with_native_function
def gen_one(self, f: NativeFunction) -> Optional[str]:
assert not f.manual_kernel_registration
if (
self.target is Target.REGISTRATION
and not self.selector.is_native_function_selected(f)
):
return None
# TODO: Now, there is something interesting going on here. In the code below,
# we generate CompositeExplicitAutogradNonFunctional implementations of functional and inplace
# based on the out implementation. But in fact, out is definable by
# functional too (just not very efficiently), and this is honestly the
# MORE likely situation for a backend implementor. How do we pick?
# Well, taking a page from Haskell type classes and default methods,
# we could conceivably register a circular definition (out in terms
# of functional, and functional in terms of out) and just require
# someone to implement one or the other. We'd have to do a little bit
# of work to not register one of these "weak" definitions unless there
# is a strong definition somewhere in the DAG! So it's not implemented yet.
if (
self.backend_index.dispatch_key
== DispatchKey.CompositeExplicitAutogradNonFunctional
and f.func.kind() is SchemaKind.out
):
# Never generate a default implementation for out, that's what you
# have to define as a backend implementor
return None
# Note [Direct dispatch bindings]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Signature of the non-dispatched function we'll expose in a header
# (e.g., at::cpu::add). We don't generate methods (TODO: do this
# when CPUTensor class is a thing); nor do we generate fallback
# bindings for manual_cpp_binding functions.
cpp_sig_group = CppSignatureGroup.from_native_function(
f, method=False, fallback_binding=False
)
# Signature of the wrapper function we'll register to the dispatcher
sig = NativeSignature(f.func, prefix="wrapper_")
if self.target is Target.NAMESPACED_DECLARATION:
result = f"TORCH_API {cpp_sig_group.signature.decl()};\n"
if cpp_sig_group.faithful_signature is not None:
result += f"TORCH_API {cpp_sig_group.faithful_signature.decl()};\n"
return result
elif self.target is Target.NAMESPACED_DEFINITION:
def generate_defn(cpp_sig: CppSignature) -> str:
return f"""
{cpp_sig.defn()} {{
return {sig.name()}({', '.join(e.expr for e in translate(cpp_sig.arguments(), sig.arguments()))});
}}
"""
result = generate_defn(cpp_sig_group.signature)
if cpp_sig_group.faithful_signature is not None:
result += generate_defn(cpp_sig_group.faithful_signature)
return result
elif self.target is Target.ANONYMOUS_DEFINITION:
k = f.func.kind()
# Construct the body of the wrapper function with signature sig
sig_body = []
# We'll use context to keep track of any variables we've brought
# into scope while generating code
context: List[Union[Binding, Expr]] = list(sig.arguments())
# Initialize the class corresponding to this structured
# operator; feeding it the output argument(s) if it is known
if self.backend_index.dispatch_key is DispatchKey.Meta:
class_name = f"structured_{meta.name(self.g)}_meta_{k.name}"
parent_class = f"at::meta::structured_{meta.name(self.g)}"
elif (
self.backend_index.dispatch_key
is DispatchKey.CompositeExplicitAutogradNonFunctional
):
# TODO: dedup this branch
class_name = f"structured_{meta.name(self.g)}_default_backend_{k.name}"
parent_class = f"at::meta::structured_{meta.name(self.g)}"
else:
metadata = self.backend_index.get_kernel(self.g)
assert metadata is not None
class_name = f"structured_{metadata.kernel}_{k.name}"
parent_class = f"{metadata.cpp_namespace}::structured_{metadata.kernel}"
if self.backend_index.device_guard:
device_check_args = itertools.chain(
f.func.arguments.out, f.func.arguments.flat_positional
)
sig_body.append(
RegisterDispatchKey.gen_device_check(
f.device_check, list(device_check_args), sig.name()
)
)
if k is SchemaKind.functional:
sig_body.append(f"{class_name} op;")
elif k is SchemaKind.inplace:
sig_body.append(f"{class_name} op(self);")
elif k is SchemaKind.out:
out_args_str = ", ".join(a.name for a in f.func.arguments.out)
sig_body.append(f"{class_name} op({out_args_str});")
# Translate the input native arguments into structured
# arguments for the meta call
meta_exprs = ", ".join(
e.expr
for e in translate(
context, structured.meta_arguments(self.g), method=False
)
)
if self.g.out.precomputed:
# If this function group has precomputed elements, the meta function
# returns a struct containing them which must be saved so that it
# can be unpacked when generating code to call the impl.
sig_body.append(f"auto precompute = op.meta({meta_exprs});")
# Put all of the contents of the precompute struct into the context
# so that translate will be able to return the correct args for the
# call to the impl.
precomputed_values = [
*self.g.out.precomputed.replace.values(),
self.g.out.precomputed.add,
]
for precomputed_elems in precomputed_values:
for arg in precomputed_elems:
context.append(
Expr(
expr=f"precompute.{arg.name}",
type=structured.argument_type(arg, binds=arg.name),
)
)
# Add a use of the precompute struct so FB internal compilers don't
# complain that there is an unused variable.
sig_body.append("(void)precompute;")
else:
sig_body.append(f"op.meta({meta_exprs});")
# After running meta, op.outputs_ is guaranteed to be valid;
# add it to the context
out_args = structured.out_arguments(self.g)
for i, out_arg in enumerate(out_args):
assert ConstRefCType(BaseCType(tensorT)) == out_arg.nctype.type
if k is SchemaKind.out:
expr = f"op.maybe_get_output({i})"
else:
maybe_star = "*" if k is SchemaKind.functional else ""
expr = f"{maybe_star}op.outputs_[{i}]"
context.append(
Expr(
expr=expr,
# TODO: Stop hardcoding that the output type is a Tensor. Note
# that for the codegen here this is fine because outputs_ is
# hardcoded to be tensor already
type=NamedCType(
out_arg.nctype.name, MutRefCType(BaseCType(tensorT))
),
)
)
# With the expanded context, do the impl call (if not a meta
# function)
if (
self.backend_index.dispatch_key
== DispatchKey.CompositeExplicitAutogradNonFunctional
):
# TODO: https://github.com/pytorch/pytorch/issues/53023
out_sig_group = CppSignatureGroup.from_native_function(
self.g.out, method=False, fallback_binding=f.manual_cpp_binding
)
out_sig = out_sig_group.most_faithful_signature()
api_name = out_sig.name()
out_exprs = ", ".join(
e.expr
for e in translate(context, out_sig.arguments(), method=False)
)
# TODO: I think this means structured won't work with method
# only functions (but maybe you're saved by faithful? iunno.)
# NB: Originally I wrote this as an at::redispatch call, but
# I got in trouble because that meant I needed a DispatchKeySet
# in the wrapper function, which meant I needed a DispatchKeySet
# in the DispatchKeyFunctions declarations, but the defined API
# there does NOT permit a dispatch key set. I think you can
# probably unwind this by calling some function to do the TLS
# fetch and get the DispatchKeySet when you don't have it, but
# I didn't do it for this version
sig_body.append(f"at::{api_name}({out_exprs});")
elif self.backend_index.dispatch_key != DispatchKey.Meta:
impl_exprs = ", ".join(
e.expr
for e in translate(
context, structured.impl_arguments(self.g), method=False
)
)
sig_body.append(f"op.impl({impl_exprs});")
# Go over each output, and check if there is a proxy created for it.
# If so, copy it over to the original output.
if k is SchemaKind.out or k is SchemaKind.inplace:
for i in range(len(f.func.returns)):
sig_body.append(
f"if (op.proxy_outputs_[{i}].has_value()) op.outputs_[{i}].get().copy_(**op.proxy_outputs_[{i}]);"
)
# Destructively return the final tensors
# TODO: Do this in translate instead
if k is SchemaKind.functional:
if len(f.func.returns) == 1:
ret_expr = "std::move(op.outputs_[0]).take()" # small optimization
else:
moved = ", ".join(
f"std::move(op.outputs_[{i}]).take()"
for i in range(len(f.func.returns))
)
ret_expr = f"std::make_tuple({moved})"
elif k is SchemaKind.inplace:
ret_expr = "self"
elif k is SchemaKind.out:
if len(f.func.returns) == 1:
ret_expr = f.func.arguments.out[0].name
else:
refs = ", ".join(a.name for a in f.func.arguments.out)
ret_expr = f"std::forward_as_tuple({refs})"
sig_body.append(f"return {ret_expr};")
sig_body_str = "\n".join(sig_body)
# For an overview of what this template code looks like, see
# https://github.com/pytorch/rfcs/pull/9
return f"""\
{self.gen_class(
f, k,
class_name=class_name,
parent_class=parent_class,
generate_super=self.g.out.structured_inherits is not None
)}
{sig.defn()} {{
{sig_body_str}
}}
"""
elif self.target is Target.REGISTRATION:
return f'm.impl("{f.func.name}", TORCH_FN({sig.name()}));'
else:
assert_never(self.target)
# Silence mypy's "Missing return statement" error
return None
| pytorch-master | torchgen/dest/register_dispatch_key.py |
import itertools
from typing import List, Sequence, Union
from torchgen.api import cpp
from torchgen.api.types import ArgName, Binding, CType, NamedCType
from torchgen.model import (
Argument,
FunctionSchema,
Return,
SelfArgument,
TensorOptionsArguments,
Type,
)
from torchgen.utils import assert_never, concatMap
# This file describes the translation of JIT schema to the dispatcher
# API, the *unboxed* calling convention by which invocations through
# the dispatcher are made. Historically, the dispatcher API matched
# the C++ API, but with the establishment of the boxed API, we've
# made changes to the dispatcher API to so that the unboxed API
# better aligns with the boxed API. The dispatcher API hooks heavily
# into our template based boxing/unboxing machinery, so changes
# to this convention will usually need template updates too.
#
# Prominent characteristics of the dispatcher API:
#
# - dtype, layout, device and pin_memory are represented as separate
# arguments.
#
def name(func: FunctionSchema) -> str:
return cpp.name(func)
def argumenttype_type(
t: Type, *, mutable: bool, binds: ArgName, remove_non_owning_ref_types: bool = False
) -> NamedCType:
# This is a faux amis. If it makes sense in the future to add
# more special cases here, or invert things so cpp.argument_type
# calls this, or just completely inline the function, please do
# it.
return cpp.argumenttype_type(
t,
mutable=mutable,
binds=binds,
remove_non_owning_ref_types=remove_non_owning_ref_types,
)
def argument_type(
a: Argument, *, binds: ArgName, remove_non_owning_ref_types: bool = False
) -> NamedCType:
return argumenttype_type(
a.type,
mutable=a.is_write,
binds=binds,
remove_non_owning_ref_types=remove_non_owning_ref_types,
)
def returns_type(rs: Sequence[Return]) -> CType:
# At present, there is no difference. But there could be!
return cpp.returns_type(rs)
def jit_arguments(func: FunctionSchema) -> List[Argument]:
def to_argument(
a: Union[Argument, TensorOptionsArguments, SelfArgument]
) -> List[Argument]:
if isinstance(a, Argument):
return [a]
elif isinstance(a, SelfArgument):
return [a.argument]
elif isinstance(a, TensorOptionsArguments):
return [a.dtype, a.layout, a.device, a.pin_memory]
else:
assert_never(a)
return list(
concatMap(
to_argument,
itertools.chain(
func.arguments.positional, func.arguments.kwarg_only, func.arguments.out
),
)
)
def argument(a: Argument, *, remove_non_owning_ref_types: bool = False) -> Binding:
return Binding(
nctype=argument_type(
a, binds=a.name, remove_non_owning_ref_types=remove_non_owning_ref_types
),
name=a.name,
argument=a,
)
def arguments(func: FunctionSchema) -> List[Binding]:
return [argument(a) for a in jit_arguments(func)]
| pytorch-master | torchgen/api/dispatcher.py |
from typing import Dict, List, NoReturn, Sequence, Union
from torchgen.api.types import (
BaseCType,
Binding,
boolT,
ConstRefCType,
deviceT,
Expr,
intArrayRefT,
iOptTensorListRefT,
iTensorListRefT,
layoutT,
ListCType,
longT,
memoryFormatT,
MutRefCType,
NamedCType,
opmath_t,
OptionalCType,
optionalIntArrayRefT,
optionalScalarRefT,
optionalTensorRefT,
scalar_t,
scalarT,
scalarTypeT,
SpecialArgName,
symIntArrayRefT,
SymIntT,
tensorListT,
tensorOptionsT,
tensorT,
VectorCType,
)
# This file implements a small program synthesis engine that implements
# conversions between one API to another.
#
# The key data type in this file in NamedCType, short for Named C++ semantic type. A NamedCType
# represents a C++ type, plus semantic information about what it represents.
# For example, consider the argument "bool pin_memory"; its normal C++ type is
# "bool", but its C++ semantic type also keeps track that this represents a
# "pin_memory"; you can't just use a random other boolean in a context where you
# need a "pin_memory"!
#
# The translator takes a list of needed NamedCTypes, and then figures out how
# to construct expressions with these NamedCTypes from the given bindings. Many
# of these expressions are trivial (I need a Tensor other; there's a Tensor
# other scope); others are more nontrivial and may require packing/unpacking.
# Some examples of non-trivial action:
#
# - Need the "dtype" binding? Well, maybe "dtype" isn't available
# in the context, instead, "options" is, and you need to extract
# it from there. (Gather)
#
# - Need the "context" binding? Well, maybe "context" isn't available
# in the context, and you need to construct it from "dtype", "device",
# etc. (Scatter)
#
# - Need the "memory_format" binding? Well, actually, it's available
# from both "memory_format" and "options", so you had better make sure
# they are consistent. (Join)
options_ctype = NamedCType("options", ConstRefCType(BaseCType(tensorOptionsT)))
out_tensor_ctype = NamedCType("out", ConstRefCType(BaseCType(tensorT)))
longVec_ctype = VectorCType(BaseCType(longT))
longSymVec_ctype = VectorCType(BaseCType(SymIntT))
optionalLongVec_ctype = OptionalCType(VectorCType(BaseCType(longT)))
optionalScalar_ctype = OptionalCType(BaseCType(scalarT))
optionalTensor_ctype = OptionalCType(BaseCType(tensorT))
class UnsatError(RuntimeError):
pass
# Given a set of in-scope bindings and a set of target bindings, synthesize
# a list of expressions that uses only the in-scope bindings (bindings) that
# have all of the types of goals. You may want to use this function if
# you're generating code for a function like:
#
# void f({args}) {
# g({exprs}); // g is a different API
# }
#
# and you need to generate "exprs".
#
# Typically, a list of Bindings is convenient to get (you usually call something
# like arguments() to get them); but technically you only need less information:
# for 'bindings' an (un-ordered) list of Exprs is sufficient; similarly, for
# 'goals', an (ordered) list of NamedCType goals is sufficient. If you are doing
# something more complicated, e.g., tracking the set of bindings in a context,
# you may find using these smaller types more convenient.
def translate(
bindings: Sequence[Union[Expr, Binding]],
goals: Sequence[Union[NamedCType, Binding]],
*,
method: bool = False,
allow_expensive_conversions: bool = False,
) -> List[Expr]:
binding_exprs: List[Expr] = []
for b in bindings:
if isinstance(b, Binding):
binding_exprs.append(
Expr(
expr=b.name,
type=b.nctype,
)
)
else:
binding_exprs.append(b)
goal_ctypes: List[NamedCType] = []
for g in goals:
if isinstance(g, Binding):
goal_ctypes.append(g.nctype)
else:
goal_ctypes.append(g)
# Add all the bindings to the context
ctx: Dict[NamedCType, str] = {}
for b in binding_exprs:
ctx[b.type] = b.expr
# While we're at it, do some simple forward inference, looking through
# constructors.
#
# NB: When should you do forward inference versus backward inference?
# The general idea:
#
# - Backward inference WHEN the goal gets smaller
# - Forward inference WHEN the hypothesis gets smaller
#
# This helps ensure termination: backward inference starts with a goal
# and tries to make it simpler and simpler until it's trivial; if the
# goal can grow in size, we blow up to a really huge goal size.
# Similarly, with forward inference we take hypotheses and decompose
# them into simpler hypotheses; if hypotheses could expand in size,
# we also have potential nontermination. (In the code below, forward
# inference is only ever carried out at a single step, but you could
# imagine repeated application of forward inference being profitable.)
#
# A good starting point in the literature for exploring more about proof
# search are these lecture notes
# https://www.cs.cmu.edu/~fp/courses/oregon-m10/04-focusing.pdf
#
# TODO: My kingdom for a pattern matcher
# https://www.python.org/dev/peps/pep-0634/
#
# TODO: This could get us in recomputation trouble if b.expr is nontrivial.
# Fix this by implementing some sort of sharing so that if multiple
# goals share the same expression, we only compute it once. This seems
# to matter in practice as compiler is often unwilling to CSE nontrivial
# expressions like scalar.to<scalar_t>()
t = b.type
if (
isinstance(t, ConstRefCType)
and isinstance(t.elem, OptionalCType)
and isinstance(t.elem.elem, BaseCType)
and str(t.elem.elem.type) == "at::Tensor"
):
ctx[
NamedCType(t.elem.elem.name, ConstRefCType(BaseCType(tensorT)))
] = f"({b.expr}.has_value() ? *{b.expr} : at::Tensor())"
if t.type == ConstRefCType(OptionalCType(BaseCType(tensorT))):
ctx[
NamedCType(t.name, BaseCType(optionalTensorRefT))
] = f"(({b.expr}.has_value() && (*{b.expr}).defined()) ? at::OptionalTensorRef(*{b.expr}) : at::OptionalTensorRef())"
if t.type == ConstRefCType(BaseCType(scalarT)):
ctx[NamedCType(t.name, BaseCType(opmath_t))] = f"({b.expr}).to<opmath_t>()"
if t.type == ConstRefCType(OptionalCType(BaseCType(scalarT))):
ctx[
NamedCType(t.name, BaseCType(optionalScalarRefT))
] = f"({b.expr}.has_value() ? at::OptionalScalarRef(&({b.expr}.value())) : at::OptionalScalarRef())"
if t.type == BaseCType(scalar_t):
ctx[
NamedCType(t.name, BaseCType(opmath_t))
] = f"static_cast<opmath_t>({b.expr})"
# [Note: ITensorListRef]
if t.type == BaseCType(tensorListT):
ctx[
NamedCType(t.name, BaseCType(iTensorListRefT))
] = f"at::ITensorListRef({b.expr})"
# [Note: IOptTensorListRef]
if t.type == ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT)))):
ctx[
NamedCType(t.name, BaseCType(iOptTensorListRefT))
] = f"at::IOptTensorListRef({b.expr})"
# Add implicit bindings if the generated code is inside a Tensor method
if method:
ctx[
NamedCType("self", MutRefCType(BaseCType(tensorT)))
] = "const_cast<Tensor&>(*this)"
ctx[
NamedCType("self", ConstRefCType(BaseCType(tensorT)))
] = "const_cast<Tensor&>(*this)"
# This is better! Byte-for-byte compat
# ctx[NamedCType("self", ConstRefCType(BaseCType(tensorT)))] = "*this"
def unsat(goal: NamedCType) -> NoReturn:
ctx_desc = "\n".join(
f" {t.cpp_type()} {t.name}; // {e}" for t, e in ctx.items()
)
raise UnsatError(
f"""
Failed to synthesize the expression "{goal.cpp_type()} {goal.name}".
When I failed, the following bindings were available in the context:
{ctx_desc}
This probably means there is a missing rule in the rules of torchgen.api.translate.
Check this module for more information.
"""
)
# A shitty backtracking search implementation. It's shitty because it
# does backtracking via stack (bad idea!) and for the most part tries to
# avoid backtracking. In particular, if
# direct=True, we won't try to do any fancy synthesis, just trivial
# conversions (e.g., "T a" is OK for "const T& a"). So all of the
# existing rules in this function simply try to solve immediately,
# and bail if things don't work out.
def solve(goal: NamedCType, *, direct: bool) -> str:
def direct_solve(goal: NamedCType) -> str:
return solve(goal, direct=True)
if goal in ctx:
# Trivial
return ctx[goal]
# const & is satisfied with mutable &
if isinstance(goal.type, ConstRefCType):
try:
# WARNING: not strictly decreasing; be careful not
# to add a direct conversion that goes satisfies
# mutable& with const&
return solve(
NamedCType(goal.name, MutRefCType(goal.type.elem)), direct=direct
)
except UnsatError:
pass
# mutable & is satisfied with value
if isinstance(goal.type, MutRefCType):
try:
return solve(NamedCType(goal.name, goal.type.elem), direct=direct)
except UnsatError:
pass
if direct:
unsat(goal)
# For now, all of these rules are mutually exclusive.
if goal == NamedCType("memory_format", OptionalCType(BaseCType(memoryFormatT))):
memory_format = direct_solve(
NamedCType(
SpecialArgName.possibly_redundant_memory_format,
OptionalCType(BaseCType(memoryFormatT)),
)
)
# No need to join "memory_format" and "options" if the target API takes "options" directly.
# Otherwise it will cause the redundant memory_format error.
if options_ctype in goal_ctypes:
return memory_format
try:
options = direct_solve(options_ctype)
return f"c10::impl::check_tensor_options_and_extract_memory_format({options}, {memory_format})"
except UnsatError:
return memory_format
elif goal == NamedCType("options", BaseCType(tensorOptionsT)):
dtype = direct_solve(
NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT)))
)
pin_memory = direct_solve(
NamedCType("pin_memory", OptionalCType(BaseCType(boolT)))
)
device = direct_solve(
NamedCType("device", OptionalCType(BaseCType(deviceT)))
)
layout = direct_solve(
NamedCType("layout", OptionalCType(BaseCType(layoutT)))
)
return f"TensorOptions().dtype({dtype}).layout({layout}).device({device}).pinned_memory({pin_memory})"
elif goal == NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT))):
try:
options = direct_solve(options_ctype)
return f"optTypeMetaToScalarType({options}.dtype_opt())"
except UnsatError:
out_tensor = direct_solve(out_tensor_ctype)
return f"{out_tensor}.scalar_type()"
elif goal == NamedCType("layout", OptionalCType(BaseCType(layoutT))):
try:
options = direct_solve(options_ctype)
return f"{options}.layout_opt()"
except UnsatError:
out_tensor = direct_solve(out_tensor_ctype)
return f"{out_tensor}.layout()"
elif goal == NamedCType("device", OptionalCType(BaseCType(deviceT))):
try:
options = direct_solve(options_ctype)
return f"{options}.device_opt()"
except UnsatError:
out_tensor = direct_solve(out_tensor_ctype)
return f"{out_tensor}.device()"
elif goal == NamedCType("pin_memory", OptionalCType(BaseCType(boolT))):
try:
options = direct_solve(options_ctype)
return f"{options}.pinned_memory_opt()"
except UnsatError:
# If we're calling a factory op from its out= variant,
# We don't actually care about the value of pin_memory.
out_tensor = direct_solve(out_tensor_ctype)
return "c10::nullopt"
# We can always do translations from value types to reference types, like vector<int> -> IntArrayRef
elif goal.type == BaseCType(intArrayRefT):
try:
return direct_solve(NamedCType(goal.name, longVec_ctype))
except UnsatError:
# We can also go SymIntArrayRef -> IntArrayRef
symIntArrayRef_type = direct_solve(
NamedCType(goal.name, BaseCType(symIntArrayRefT))
)
return f"c10::asIntArrayRefSlow({symIntArrayRef_type})"
elif goal.type == BaseCType(symIntArrayRefT):
return direct_solve(NamedCType(goal.name, longSymVec_ctype))
elif goal.type == BaseCType(longT):
symInt_type = direct_solve(NamedCType(goal.name, BaseCType(SymIntT)))
return f"{symInt_type}.expectInt()"
elif goal.type == BaseCType(optionalIntArrayRefT):
return direct_solve(NamedCType(goal.name, optionalLongVec_ctype))
elif goal.type == BaseCType(optionalScalarRefT):
return direct_solve(NamedCType(goal.name, optionalScalar_ctype))
elif goal.type == BaseCType(optionalTensorRefT):
return direct_solve(NamedCType(goal.name, optionalTensor_ctype))
# Note [translation from C++ reference to value types]
# The below cases are all for when we have an argument with a reference type,
# and a corresponding goal with a value type.
# These are needed when we populate the inputs to a lambda capture and we need
# to guarantee the lifetime of each captured argument.
# We guard it with an explicit kwarg because converting to a value type is expensive
# (O(n)) to convert from IntArrayRef to vector<int>),
# so the caller of translate() should be explicit that they need it.
if allow_expensive_conversions:
if goal.type == VectorCType(BaseCType(longT)):
intArrayRef_ctype = NamedCType(goal.name, BaseCType(intArrayRefT))
argname = direct_solve(intArrayRef_ctype)
return f"{argname}.vec()"
if goal.type == VectorCType(BaseCType(SymIntT)):
symIntArrayRef_ctype = NamedCType(goal.name, BaseCType(symIntArrayRefT))
argname = direct_solve(symIntArrayRef_ctype)
return f"{argname}.vec()"
elif goal.type == OptionalCType(VectorCType(BaseCType(longT))):
optionalIntArrayRef_ctype = NamedCType(
goal.name, BaseCType(optionalIntArrayRefT)
)
argname = direct_solve(optionalIntArrayRef_ctype)
return f"{argname}.has_value() ? c10::make_optional({argname}->vec()) : c10::nullopt"
elif goal.type == OptionalCType(BaseCType(scalarT)):
optionalScalarRef_ctype = NamedCType(
goal.name, BaseCType(optionalScalarRefT)
)
argname = direct_solve(optionalScalarRef_ctype)
return f"{argname}.has_value() ? c10::make_optional({argname}) : c10::nullopt"
elif goal.type == OptionalCType(BaseCType(scalarT)):
optionalTensorRef_ctype = NamedCType(
goal.name, BaseCType(optionalTensorRefT)
)
argname = direct_solve(optionalTensorRef_ctype)
return f"{argname}.has_value() ? c10::make_optional({argname}) : c10::nullopt"
# Technically, we also need to handle cases of C++ containers holding reference types.
# But there currently aren't any ops that require lambda capture codegen
# With arguments like std::vector<IntArrayRef>.
# If that changes, we'll have to add the translation here.
# We allow const casting on tensors, since const-correctness is a bit broken for at::Tensor.
# We could probably generalize this to non-tensor types too.
if goal.type == MutRefCType(BaseCType(tensorT)):
const_ref_tensor_ctype = NamedCType(
goal.name, ConstRefCType(BaseCType(tensorT))
)
argname = direct_solve(const_ref_tensor_ctype)
return f"const_cast<Tensor&>({argname})"
unsat(goal)
return [Expr(solve(g, direct=False), g) for g in goal_ctypes]
| pytorch-master | torchgen/api/translate.py |
from dataclasses import dataclass
from typing import List, Optional
import torchgen.api.types as api_types
from torchgen.api import cpp, structured
from torchgen.api.types import (
ArgName,
BaseCppType,
BaseCType,
Binding,
ConstRefCType,
CType,
NamedCType,
scalarT,
)
from torchgen.model import (
Argument,
BaseTy,
BaseType,
DispatchKey,
FunctionSchema,
NativeFunctionsGroup,
Type,
)
def schema_kernel_name(func: FunctionSchema, dispatch_key: DispatchKey) -> str:
assert func.is_out_fn(), "ufunc.kernel_name should only be invoked on out schemas"
return f"ufunc_{func.name.name}_{dispatch_key}"
def kernel_name(g: NativeFunctionsGroup, dispatch_key: DispatchKey) -> str:
return schema_kernel_name(g.out.func, dispatch_key)
# Tensors are omitted (as they are stored in TensorIterator), everything else is
# passed along (technically, we can pass tensors along too, it just wastes
# argument registers)
#
# NB: used for CPU only
def dispatchstub_type(t: Type, *, binds: ArgName) -> Optional[NamedCType]:
r = cpp.valuetype_type(t, binds=binds)
if r is not None:
return r
if t == BaseType(BaseTy.Scalar):
return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
elif t == BaseType(BaseTy.Tensor):
return None
else:
raise AssertionError(f"unrecognized type {repr(t)}")
def opmath_type(scalar_t: BaseCppType) -> BaseCppType:
if scalar_t == api_types.scalar_t:
return api_types.opmath_t
raise NotImplementedError
# NB: Tensors in constructor are stored in opmath_t, not scalar_t
# because Tensor in constructor = its a scalar tensor partially applied =
# it can be higher precision and we want to compute in that higher precision
#
# NB: CUDA only
def ufunctor_ctor_type(t: Type, *, binds: ArgName, scalar_t: BaseCppType) -> NamedCType:
r = cpp.valuetype_type(t, binds=binds)
if r is not None:
return r
if t == BaseType(BaseTy.Scalar):
return NamedCType(binds, BaseCType(opmath_type(scalar_t)))
elif t == BaseType(BaseTy.Tensor):
return NamedCType(binds, BaseCType(opmath_type(scalar_t)))
else:
raise AssertionError(f"unrecognized type {repr(t)}")
# Only Tensors ever get passed directly to operator()
#
# NB: CUDA only
# (Actually, this works for CPU too)
def ufunctor_apply_type(
t: Type, *, binds: ArgName, scalar_t: BaseCppType
) -> NamedCType:
if t == BaseType(BaseTy.Tensor):
return NamedCType(binds, BaseCType(scalar_t))
else:
raise AssertionError(f"unrecognized type {repr(t)}")
# The actual ufunc template function the user writes. Everything here
# is done in the computation type. compute_t is opmath_t in CUDA and scalar_t
# in CPU
def ufunc_type(t: Type, *, binds: ArgName, compute_t: CType) -> NamedCType:
r = cpp.valuetype_type(t, binds=binds)
if r is not None:
return r
if t == BaseType(BaseTy.Scalar):
return NamedCType(binds, compute_t)
elif t == BaseType(BaseTy.Tensor):
return NamedCType(binds, compute_t)
else:
raise AssertionError(f"unrecognized type {repr(t)}")
def ufunctor_ctor_argument(a: Argument, scalar_t: BaseCppType) -> Binding:
return Binding(
nctype=ufunctor_ctor_type(a.type, binds=a.name, scalar_t=scalar_t),
name=a.name,
default=None,
argument=a,
)
def ufunctor_apply_argument(a: Argument, scalar_t: BaseCppType) -> Binding:
return Binding(
nctype=ufunctor_apply_type(a.type, binds=a.name, scalar_t=scalar_t),
name=a.name,
default=None,
argument=a,
)
def ufunc_argument(a: Argument, compute_t: CType) -> Binding:
return Binding(
nctype=ufunc_type(a.type, binds=a.name, compute_t=compute_t),
name=a.name,
default=None,
argument=a,
)
@dataclass(frozen=True)
class UfunctorBindings:
ctor: List[Binding]
apply: List[Binding]
# ufunctors are a CUDA-only concept representing functors that take some of
# their arguments on a host-side constructor, and the rest in the device-side
# apply. E.g.,
#
# template <typename scalar_t>
# struct CUDAFunctorOnSelf_add {
# using opmath_t = at::opmath_type<scalar_t>;
# opmath_t other_;
# opmath_t alpha_;
# CUDAFunctorOnSelf_add(opmath_t other, opmath_t alpha) : other_(other), alpha_(alpha) {}
# __device__ scalar_t operator()(scalar_t self) {
# return ufunc::add(static_cast<opmath_t>(self), other_, alpha_);
# }
# };
#
# The ctor refers to the constructor CUDAFunctorOnSelf_add, while apply refers
# to the operator() definition
def ufunctor_arguments(
g: NativeFunctionsGroup, *, scalar_tensor_idx: Optional[int], scalar_t: BaseCppType
) -> UfunctorBindings:
ctor = []
apply = []
for a in g.functional.func.arguments.flat_non_out:
if a.type.is_tensor_like():
if scalar_tensor_idx == 0:
# put it in the ctor anyway
ctor.append(ufunctor_ctor_argument(a, scalar_t=scalar_t))
scalar_tensor_idx = None
else:
if scalar_tensor_idx is not None:
scalar_tensor_idx -= 1
apply.append(ufunctor_apply_argument(a, scalar_t=scalar_t))
else:
ctor.append(ufunctor_ctor_argument(a, scalar_t=scalar_t))
assert scalar_tensor_idx is None
return UfunctorBindings(ctor=ctor, apply=apply)
# ufuncs are the inner loop template functions that you wrote in ufunc/add.h
# which do the actual computation in question. E.g.,
#
# template <typename T>
# C10_HOST_DEVICE T add(T self, T other, T alpha) __ubsan_ignore_undefined__ {
# return self + alpha * other;
# }
#
# In this file, we refer to T as compute_t which is bound by caller
def ufunc_arguments(g: NativeFunctionsGroup, *, compute_t: CType) -> List[Binding]:
return [
ufunc_argument(a, compute_t=compute_t)
for a in g.functional.func.arguments.flat_non_out
]
# Stubs are the DispatchStub trampolines that CPU kernels use to get to their
# vectorized versions. E.g.,
#
# using structured_binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha);
# DECLARE_DISPATCH(structured_binary_fn_alpha, add_stub);
def stub_arguments(g: NativeFunctionsGroup) -> List[Binding]:
# stubs drop all tensor arguments (they are implicit in the TensorIterator
# argument and keep everything else)
return [
r
for a in g.out.func.arguments.flat_non_out
if not a.type.is_tensor_like()
for r in structured.argument(a)
]
| pytorch-master | torchgen/api/ufunc.py |
pytorch-master | torchgen/api/__init__.py |
|
from typing import List, Optional, Sequence, Union
from torchgen import local
from torchgen.api import cpp
from torchgen.api.types import (
ArgName,
BaseCType,
Binding,
boolT,
ConstRefCType,
CType,
deviceT,
layoutT,
ListCType,
MutRefCType,
NamedCType,
OptionalCType,
scalarT,
scalarTypeT,
tensorT,
)
from torchgen.model import (
Argument,
FunctionSchema,
Return,
SelfArgument,
TensorOptionsArguments,
Type,
)
from torchgen.utils import assert_never
# This file describes the translation of JIT schema to the native functions API.
# This looks a lot like the C++ API (which makes historical sense, because the
# idea was you wrote native functions to implement functions in the C++ API),
# but over time we have evolved the C++ API without actually changing our
# native:: kernels. The intention is to make native API and dispatcher API
# line up as closely as possible, since this results in the least overhead
# (no translation is needed from dispatcher API to native API).
def name(func: FunctionSchema) -> str:
name = str(func.name.name)
# TODO: delete this!
if func.is_out_fn():
name += "_out"
if func.name.overload_name:
name += f"_{func.name.overload_name}"
return name
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:
if str(t) == "Tensor?":
tensor_type: OptionalCType = OptionalCType(BaseCType(tensorT))
if mutable and not local.use_const_ref_for_mutable_tensors():
return NamedCType(binds, MutRefCType(tensor_type))
else:
return NamedCType(binds, ConstRefCType(tensor_type))
elif str(t) == "Tensor?[]":
return NamedCType(
binds, ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT))))
)
elif str(t) == "Scalar":
return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
elif str(t) == "Scalar?":
return NamedCType(binds, ConstRefCType(OptionalCType(BaseCType(scalarT))))
return cpp.argumenttype_type(t, mutable=mutable, binds=binds)
def returns_type(rs: Sequence[Return]) -> CType:
return cpp.returns_type(rs)
def argument_type(a: Argument, *, binds: ArgName) -> NamedCType:
return argumenttype_type(a.type, mutable=a.is_write, binds=binds)
def argument(
a: Union[Argument, SelfArgument, TensorOptionsArguments], *, is_out: bool
) -> List[Binding]:
# Ideally, we NEVER default native functions. However, there are a number
# of functions that call native:: directly and rely on the defaulting
# existing. So for BC, we generate defaults for non-out variants (but not
# for out variants, where it is impossible to generate an appropriate
# default)
should_default = not is_out
if isinstance(a, Argument):
default: Optional[str] = None
if should_default and a.default is not None:
default = cpp.default_expr(a.default, a.type)
return [
Binding(
nctype=argument_type(a, binds=a.name),
name=a.name,
default=default,
argument=a,
)
]
elif isinstance(a, SelfArgument):
# Erase SelfArgument from the distinction
return argument(a.argument, is_out=is_out)
elif isinstance(a, TensorOptionsArguments):
default = None
if should_default:
default = "{}"
# TODO: Not sure why the arguments assigned here are for
# TensorOptionsArguments and not the constituent pieces. It seems
# to matter
return [
Binding(
nctype=NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT))),
name="dtype",
default=default,
argument=a,
),
Binding(
nctype=NamedCType("layout", OptionalCType(BaseCType(layoutT))),
name="layout",
default=default,
argument=a,
),
Binding(
nctype=NamedCType("device", OptionalCType(BaseCType(deviceT))),
name="device",
default=default,
argument=a,
),
Binding(
nctype=NamedCType("pin_memory", OptionalCType(BaseCType(boolT))),
name="pin_memory",
default=default,
argument=a,
),
]
else:
assert_never(a)
def arguments(func: FunctionSchema) -> List[Binding]:
args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
args.extend(func.arguments.non_out)
args.extend(func.arguments.out)
return [r for arg in args for r in argument(arg, is_out=func.is_out_fn())]
| pytorch-master | torchgen/api/native.py |
from dataclasses import dataclass
from enum import Enum
from typing import Dict, List, Optional, Sequence, Set, TypeVar, Union
from torchgen.model import (
Argument,
BackendIndex,
BaseTy,
FunctionSchema,
NativeFunction,
NativeFunctionsGroup,
NativeFunctionsViewGroup,
ScalarType,
SelfArgument,
TensorOptionsArguments,
)
_T = TypeVar("_T")
# An ArgName is just the str name of the argument in schema;
# but in some special circumstances, we may add a little extra
# context. The Enum SpecialArgName covers all of these cases;
# grep for their construction sites to see when they can occr.
SpecialArgName = Enum("SpecialArgName", ("possibly_redundant_memory_format",))
ArgName = Union[str, SpecialArgName]
# This class shouldn't be created directly; instead, use/create one of the singletons below.
@dataclass(frozen=True)
class BaseCppType:
ns: Optional[str]
name: str
def __str__(self) -> str:
if self.ns is None or self.ns == "":
return self.name
return f"{self.ns}::{self.name}"
# The set of all non-templated, valid, fully-qualified names of C++ types that are used in the codegen.
# Templated types get their own dataclass, mainly to make namespace parsing easier.
byteT = BaseCppType("", "uint8_t")
charT = BaseCppType("", "int8_t")
shortT = BaseCppType("", "int16_t")
# It would be more symmetric for this to be called intT, but it easy to mix
# this up with JIT int (which is int64_t in C++), so we intentionally don't
# define intT to make it obvious when you've stuffed it up
int32T = BaseCppType("", "int32_t")
longT = BaseCppType("", "int64_t")
halfT = BaseCppType("at", "Half")
doubleT = BaseCppType("", "double")
floatT = BaseCppType("", "float")
complexHalfT = BaseCppType(
"c10", "complex<c10::Half>"
) # stuffing template param here is an abuse
complexFloatT = BaseCppType("c10", "complex<float>")
complexDoubleT = BaseCppType("c10", "complex<double>")
boolT = BaseCppType("", "bool")
bfloat16T = BaseCppType("at", "BFloat16")
voidT = BaseCppType("", "void")
stringT = BaseCppType("c10", "string_view")
generatorT = BaseCppType("at", "Generator")
scalarTypeT = BaseCppType("at", "ScalarType")
tensorT = BaseCppType("at", "Tensor")
optionalTensorRefT = BaseCppType("at", "OptionalTensorRef")
tensorListT = BaseCppType("at", "TensorList")
iTensorListRefT = BaseCppType("at", "ITensorListRef")
iOptTensorListRefT = BaseCppType("at", "IOptTensorListRef")
dimnameT = BaseCppType("at", "Dimname")
dimnameListT = BaseCppType("at", "DimnameList")
dimVectorT = BaseCppType("at", "DimVector")
layoutT = BaseCppType("at", "Layout")
deviceT = BaseCppType("at", "Device")
scalarT = BaseCppType("at", "Scalar")
optionalScalarRefT = BaseCppType("at", "OptionalScalarRef")
memoryFormatT = BaseCppType("at", "MemoryFormat")
qschemeT = BaseCppType("at", "QScheme")
storageT = BaseCppType("at", "Storage")
streamT = BaseCppType("at", "Stream")
intArrayRefT = BaseCppType("at", "IntArrayRef")
optionalIntArrayRefT = BaseCppType("at", "OptionalIntArrayRef")
tensorOptionsT = BaseCppType("at", "TensorOptions")
typeAndSizeT = BaseCppType("torch::autograd::generated", "TypeAndSize")
tensorGeometryT = BaseCppType("at", "TensorGeometry")
SymIntT = BaseCppType("c10", "SymInt")
symIntArrayRefT = BaseCppType("c10", "SymIntArrayRef")
# Types representing template parameters. Technically, we probably shouldn't
# represent them this way in codegen, but it was pretty convenient.
scalar_t = BaseCppType("", "scalar_t")
opmath_t = BaseCppType("", "opmath_t")
ScalarTypeToCppMapping: Dict[ScalarType, BaseCppType] = {
ScalarType.Byte: byteT,
ScalarType.Char: charT,
ScalarType.Short: shortT,
ScalarType.Int: int32T,
ScalarType.Long: longT,
ScalarType.Half: halfT,
ScalarType.Float: floatT,
ScalarType.Double: doubleT,
ScalarType.ComplexHalf: complexHalfT,
ScalarType.ComplexFloat: complexFloatT,
ScalarType.ComplexDouble: complexDoubleT,
ScalarType.Bool: boolT,
ScalarType.BFloat16: bfloat16T,
}
BaseTypeToCppMapping: Dict[BaseTy, BaseCppType] = {
BaseTy.int: longT,
BaseTy.float: doubleT,
BaseTy.bool: boolT,
BaseTy.str: stringT,
BaseTy.Generator: generatorT,
BaseTy.ScalarType: scalarTypeT,
BaseTy.Tensor: tensorT,
BaseTy.Dimname: dimnameT,
BaseTy.DimVector: dimVectorT,
BaseTy.Layout: layoutT,
BaseTy.Device: deviceT,
BaseTy.Scalar: scalarT,
BaseTy.MemoryFormat: memoryFormatT,
BaseTy.QScheme: qschemeT,
BaseTy.Storage: storageT,
BaseTy.Stream: streamT,
BaseTy.SymInt: SymIntT,
}
# CTypes encode C++ type structure as needed for translation.
@dataclass(frozen=True)
class BaseCType:
type: BaseCppType
def cpp_type(self, *, strip_ref: bool = False) -> str:
return str(self.type)
# For BC reasons, we don't want to introduce at:: namespaces to RegistrationDeclarations.yaml
# TODO: Kill this when we eventually remove it!
def cpp_type_registration_declarations(self) -> str:
return str(self.type).replace("at::", "")
def remove_const_ref(self) -> "CType":
return self
@dataclass(frozen=True)
class ConstRefCType:
elem: "CType"
def cpp_type(self, *, strip_ref: bool = False) -> str:
if strip_ref:
return self.elem.cpp_type(strip_ref=strip_ref)
return f"const {self.elem.cpp_type()} &"
def cpp_type_registration_declarations(self) -> str:
return f"const {self.elem.cpp_type_registration_declarations()} &"
def remove_const_ref(self) -> "CType":
return self.elem.remove_const_ref()
@dataclass(frozen=True)
class MutRefCType:
elem: "CType"
def cpp_type(self, *, strip_ref: bool = False) -> str:
if strip_ref:
return self.elem.cpp_type(strip_ref=strip_ref)
return f"{self.elem.cpp_type()} &"
def cpp_type_registration_declarations(self) -> str:
return f"{self.elem.cpp_type_registration_declarations()} &"
def remove_const_ref(self) -> "CType":
return self.elem.remove_const_ref()
@dataclass(frozen=True)
class OptionalCType:
elem: "CType"
def cpp_type(self, *, strip_ref: bool = False) -> str:
# Do not pass `strip_ref` recursively.
return f"c10::optional<{self.elem.cpp_type()}>"
def cpp_type_registration_declarations(self) -> str:
return f"c10::optional<{self.elem.cpp_type_registration_declarations()}>"
def remove_const_ref(self) -> "CType":
return OptionalCType(self.elem.remove_const_ref())
@dataclass(frozen=True)
class ListCType:
elem: "CType"
def cpp_type(self, *, strip_ref: bool = False) -> str:
# Do not pass `strip_ref` recursively.
return f"c10::List<{self.elem.cpp_type()}>"
def cpp_type_registration_declarations(self) -> str:
return f"c10::List<{self.elem.cpp_type_registration_declarations()}>"
def remove_const_ref(self) -> "CType":
return ListCType(self.elem.remove_const_ref())
@dataclass(frozen=True)
class ArrayRefCType:
elem: "CType"
def cpp_type(self, *, strip_ref: bool = False) -> str:
# Do not pass `strip_ref` recursively.
return f"at::ArrayRef<{self.elem.cpp_type()}>"
def cpp_type_registration_declarations(self) -> str:
return f"ArrayRef<{self.elem.cpp_type_registration_declarations()}>"
def remove_const_ref(self) -> "CType":
return ArrayRefCType(self.elem.remove_const_ref())
@dataclass(frozen=True)
class VectorCType:
elem: "CType"
def cpp_type(self, *, strip_ref: bool = False) -> str:
# Do not pass `strip_ref` recursively.
return f"::std::vector<{self.elem.cpp_type()}>"
def cpp_type_registration_declarations(self) -> str:
return f"::std::vector<{self.elem.cpp_type_registration_declarations()}>"
def remove_const_ref(self) -> "CType":
return VectorCType(self.elem.remove_const_ref())
@dataclass(frozen=True)
class ArrayCType:
elem: "CType"
size: int
def cpp_type(self, *, strip_ref: bool = False) -> str:
# Do not pass `strip_ref` recursively.
return f"::std::array<{self.elem.cpp_type()},{self.size}>"
def cpp_type_registration_declarations(self) -> str:
return f"::std::array<{self.elem.cpp_type_registration_declarations()},{self.size}>"
def remove_const_ref(self) -> "CType":
return ArrayCType(self.elem.remove_const_ref(), self.size)
@dataclass(frozen=True)
class TupleCType:
elems: List["CType"]
def cpp_type(self, *, strip_ref: bool = False) -> str:
# Do not pass `strip_ref` recursively.
return f'::std::tuple<{",".join([e.cpp_type() for e in self.elems])}>'
def cpp_type_registration_declarations(self) -> str:
return f'::std::tuple<{",".join([e.cpp_type_registration_declarations() for e in self.elems])}>'
def remove_const_ref(self) -> "CType":
return TupleCType([e.remove_const_ref() for e in self.elems])
@dataclass(frozen=True)
class VectorizedCType:
# This template is explicitly specialized, so the only valid
# elems are those we have specializations for (e.g., float, double, ...)
# scalar_t is also a common argument here (when we are codegen in
# a templated context)
elem: BaseCType
def cpp_type(self, *, strip_ref: bool = False) -> str:
return f"at::vec::Vectorized<{self.elem.cpp_type()}>"
def cpp_type_registration_declarations(self) -> str:
raise NotImplementedError
def remove_const_ref(self) -> "CType":
return self
CType = Union[
BaseCType,
OptionalCType,
ConstRefCType,
MutRefCType,
ListCType,
ArrayRefCType,
ArrayCType,
VectorCType,
TupleCType,
VectorizedCType,
]
# A NamedCType is short for Named C++ semantic type. A NamedCType represents a C++ type, plus
# semantic information about what it represents. For example, consider the
# argument "bool pin_memory"; its normal C++ type is "bool", but its C++
# semantic type also keeps track that this represents a "pin_memory"; you can't
# just use a random other boolean in a context where you need a "pin_memory"!
#
@dataclass(frozen=True)
class NamedCType:
name: ArgName
type: CType
def cpp_type(self, *, strip_ref: bool = False) -> str:
return self.type.cpp_type(strip_ref=strip_ref)
# For BC reasons, we don't want to introduce at:: namespaces to RegistrationDeclarations.yaml
# TODO: Kill this when we eventually remove it!
def cpp_type_registration_declarations(self) -> str:
return self.type.cpp_type_registration_declarations()
def remove_const_ref(self) -> "NamedCType":
return NamedCType(self.name, self.type.remove_const_ref())
def with_name(self, name: str) -> "NamedCType":
return NamedCType(name, self.type)
# A binding represents any C++ binding site for a formal parameter.
# We don't distinguish between binding sites for different APIs;
# instead, all of the important distinctions are encoded in CType,
# which you can use to figure out if a given Binding is appropriate
# for use in another context. (See torchgen.api.translate)
@dataclass(frozen=True)
class Binding:
name: str
nctype: NamedCType
argument: Union[Argument, TensorOptionsArguments, SelfArgument]
# TODO: maybe don't represent default here
default: Optional[str] = None
def rename(self, name: str) -> "Binding":
return Binding(
name=name,
nctype=self.nctype,
argument=self.argument,
default=self.default,
)
@property
def type(self) -> str:
return self.nctype.cpp_type()
def no_default(self) -> "Binding":
return Binding(
name=self.name,
nctype=self.nctype,
default=None,
argument=self.argument,
)
def decl(self, *, func_ptr_cast: bool = False) -> str:
mb_default = ""
if self.default is not None:
mb_default = f"={self.default}"
# casting only needs to know the type
if func_ptr_cast:
return f"{self.type}"
else:
return f"{self.type} {self.name}{mb_default}"
# For BC reasons, we don't want to introduce at:: namespaces to RegistrationDeclarations.yaml
# TODO: Kill this when we eventually remove it!
def decl_registration_declarations(self) -> str:
type_s = self.nctype.cpp_type_registration_declarations()
mb_default = ""
if self.default is not None:
mb_default = f"={self.default}"
return f"{type_s} {self.name}{mb_default}"
def defn(self) -> str:
return f"{self.type} {self.name}"
def with_name(self, name: str) -> "Binding":
return Binding(
name=name, nctype=self.nctype, argument=self.argument, default=self.default
)
# An Expr is a C++ expression. It has a C++ string representing its syntax,
# as well as a CType saying what it provides.
@dataclass(frozen=True)
class Expr:
expr: str
type: NamedCType
# A CppSignature represents a single overload in the C++ API. For
# any given function schema, there may be multiple CppSignatures
# corresponding to it, based on how we desugar to C++. See also
# CppSignatureGroup.
@dataclass(frozen=True)
class CppSignature:
# The schema this signature is derived from
func: FunctionSchema
# Is this a C++ signature for a method, i.e. Tensor::my_op(...)?
method: bool
# Is this a faithful C++ signature (i.e. following the JIT schema) or a convenience API
# (i.e. with a potential TensorOptions argument and out arguments in the front)
faithful: bool
# The set of C++ arguments which should not have defaults applied to them
cpp_no_default_args: Set[str]
# Is this a fallback C++ binding? Fallback bindings are enabled by
# manual_cpp_binding: True and are alternate, non-public API that
# lets manual C++ binding implementors access the binding that would
# have been automatically generated
fallback_binding: bool = False
# Return the unpacked argument structure of this signature,
# discarding information about which arguments are semantically
# related to each other.
def arguments(self) -> Sequence[Binding]:
return cpp.arguments(
self.func.arguments,
faithful=self.faithful,
method=self.method,
cpp_no_default_args=self.cpp_no_default_args,
)
def name(self) -> str:
n = cpp.name(self.func, faithful_name_for_out_overloads=self.faithful)
if self.fallback_binding:
n = f"__dispatch_{n}"
return n
# Render the C++ declaration for this signature
def decl(
self,
*,
name: Optional[str] = None,
prefix: str = "",
is_redispatching_fn: bool = False,
) -> str:
returns_type = cpp.returns_type(self.func.returns).cpp_type()
cpp_args = [a.decl() for a in self.arguments()]
if is_redispatching_fn:
cpp_args = ["c10::DispatchKeySet dispatchKeySet"] + cpp_args
cpp_args_str = ", ".join(cpp_args)
if name is None:
name = prefix + self.name()
return f"{returns_type} {name}({cpp_args_str})"
# Render the C++ definition for this signature, not including
# the body (with curly braces)
def defn(
self,
*,
name: Optional[str] = None,
prefix: str = "",
is_redispatching_fn: bool = False,
) -> str:
returns_type = cpp.returns_type(self.func.returns).cpp_type()
cpp_args = [a.defn() for a in self.arguments()]
if is_redispatching_fn:
cpp_args = ["c10::DispatchKeySet dispatchKeySet"] + cpp_args
cpp_args_str = ", ".join(cpp_args)
if name is None:
name = prefix + self.name()
return f"{returns_type} {name}({cpp_args_str})"
def ptr_type(self) -> str:
args_types_str = ", ".join(a.type for a in self.arguments())
return f"{cpp.returns_type(self.func.returns).cpp_type()} (*)({args_types_str})"
# Return the C++ function type, e.g., something like int(bool)
def type(self) -> str:
args_types_str = ", ".join(a.type for a in self.arguments())
return f"{cpp.returns_type(self.func.returns).cpp_type()} ({args_types_str})"
# Represents group of all CppSignatures associated with a
# FunctionSchema. Right now, that's the regular, user-visible
# signature, as well as a "faithful" signature which doesn't
# have grouping.
@dataclass(frozen=True)
class CppSignatureGroup:
func: FunctionSchema
signature: CppSignature
faithful_signature: Optional[CppSignature]
def most_faithful_signature(self) -> CppSignature:
if self.faithful_signature:
return self.faithful_signature
else:
return self.signature
@staticmethod
def from_native_function(
f: NativeFunction, *, method: bool, fallback_binding: bool = False
) -> "CppSignatureGroup":
func = f.func
faithful_signature: Optional[CppSignature]
if func.arguments.tensor_options is not None or len(func.arguments.out) > 0:
faithful_signature = CppSignature(
func=func,
faithful=True,
method=method,
fallback_binding=fallback_binding,
cpp_no_default_args=f.cpp_no_default_args,
)
else:
faithful_signature = None
signature = CppSignature(
func=func,
faithful=False,
method=method,
fallback_binding=fallback_binding,
cpp_no_default_args=f.cpp_no_default_args,
)
return CppSignatureGroup(
func=func,
signature=signature,
faithful_signature=faithful_signature,
)
@dataclass(frozen=True)
class DispatcherSignature:
# The schema this signature is derived from
func: FunctionSchema
# Allows you to prepend an arbitrary prefix to the signature name.
# This is useful for parts of the codegen that generate wrappers around kernels,
# and need to avoid naming collisions.
prefix: str = ""
def arguments(self) -> List[Binding]:
return dispatcher.arguments(self.func)
def name(self) -> str:
return self.prefix + dispatcher.name(self.func)
def decl(self, name: Optional[str] = None) -> str:
args_str = ", ".join(a.decl() for a in self.arguments())
if name is None:
name = self.name()
return f"{self.returns_type().cpp_type()} {name}({args_str})"
def defn(
self, name: Optional[str] = None, *, is_redispatching_fn: bool = False
) -> str:
args = [a.defn() for a in self.arguments()]
if is_redispatching_fn:
args = ["c10::DispatchKeySet dispatchKeySet"] + args
args_str = ", ".join(args)
if name is None:
name = self.name()
return f"{self.returns_type().cpp_type()} {name}({args_str})"
def exprs(self) -> List[Expr]:
return [Expr(a.name, a.nctype) for a in self.arguments()]
def returns_type(self) -> CType:
return dispatcher.returns_type(self.func.returns)
def ptr_type(self) -> str:
dispatcher_args_types_str = ", ".join(a.type for a in self.arguments())
return f"{self.returns_type().cpp_type()} (*)({dispatcher_args_types_str})"
# Return the C++ function type, e.g., something like int(bool)
def type(self) -> str:
dispatcher_args_types_str = ", ".join(a.type for a in self.arguments())
return f"{self.returns_type().cpp_type()} ({dispatcher_args_types_str})"
@staticmethod
def from_schema(func: FunctionSchema, *, prefix: str = "") -> "DispatcherSignature":
return DispatcherSignature(func, prefix)
@dataclass(frozen=True)
class NativeSignature:
# The schema this signature is derived from
func: FunctionSchema
prefix: str = ""
def name(self) -> str:
return self.prefix + native.name(self.func)
def decl(self, name: Optional[str] = None) -> str:
args_str = ", ".join(a.decl() for a in self.arguments())
if name is None:
name = self.name()
return f"{native.returns_type(self.func.returns).cpp_type()} {name}({args_str})"
def defn(self, name: Optional[str] = None) -> str:
args_str = ", ".join(a.defn() for a in self.arguments())
if name is None:
name = self.name()
return f"{native.returns_type(self.func.returns).cpp_type()} {name}({args_str})"
def ptr_type(self) -> str:
# don't include defaults in type signature!
args_str = ", ".join(a.defn() for a in self.arguments())
return f"{native.returns_type(self.func.returns).cpp_type()} (*)({args_str})"
def arguments(self) -> List[Binding]:
return native.arguments(self.func)
def returns_type(self) -> CType:
return native.returns_type(self.func.returns)
def dispatcher_exprs(self) -> List[Expr]:
return translate.translate(
self.arguments(), dispatcher.arguments(self.func), method=False
)
@dataclass(frozen=True)
class ViewInverseSignature:
g: NativeFunctionsViewGroup
def name(self) -> str:
assert self.g.view_copy is not None
return functionalization.name(self.g, is_reverse=True, include_namespace=False)
def decl(self) -> str:
assert self.g.view_copy is not None
return_type = functionalization.returns_type(self.g.view_copy.func)
decls = [
a.decl()
for a in functionalization.inner_arguments(
self.g.view_copy.func, is_reverse=True
)
]
return f"static {return_type.cpp_type()} {self.name()}({', '.join(decls)});"
@dataclass(frozen=True)
class FunctionalizationLambda:
g: NativeFunctionsViewGroup
# are we generating the forward lambda or the reverse lambda?
is_reverse: bool
def captures(self) -> List[Expr]:
# The lambda lives inside of a kernel following the dispatcher API, so its outer context is the dispatcher arguments
# We also need to read the "reapply views" TLS at the time that the functionalization kernel was executed,
# and plumb it into the lambda.
outer_ctx = dispatcher.arguments(self.g.view.func) + [
functionalization.reapply_views_binding
]
capture_bindings = functionalization.capture_arguments(
self.g.view.func, is_reverse=self.is_reverse
)
# allow_expensive_conversions is set because we want to convert
# some reference types (IntArrayRef) to value types (vector<int64_t>).
capture_exprs = translate.translate(
outer_ctx, capture_bindings, method=False, allow_expensive_conversions=True
)
return capture_exprs
def decl(self) -> str:
return_type = functionalization.returns_type(self.g.view.func)
capture_str = ", ".join(
f"{val.type.name} = {val.expr}" for val in self.captures()
)
decls = [
a.decl()
for a in functionalization.outer_arguments(is_reverse=self.is_reverse)
]
return f"[{capture_str}]({', '.join(decls)}) -> {return_type.cpp_type()}"
def inner_call(self, *, reapply_views: Optional[bool] = None) -> str:
inner_call_name = functionalization.name(
self.g,
is_reverse=self.is_reverse,
include_namespace=True,
reapply_views=reapply_views,
)
arg_ctx = functionalization.outer_arguments(is_reverse=self.is_reverse)
capture_ctx = functionalization.capture_arguments(
self.g.view.func, is_reverse=self.is_reverse
)
full_ctx = arg_ctx + capture_ctx
assert self.g.view_copy is not None
call_bindings = functionalization.inner_arguments(
self.g.view_copy.func, is_reverse=self.is_reverse
)
maybe_index = functionalization.inner_call_index(self.g.view_copy.func)
call_exprs = [
e.expr for e in translate.translate(full_ctx, call_bindings, method=False)
]
if not self.is_reverse and maybe_index is not None:
return f'{inner_call_name}({", ".join(call_exprs)})[{maybe_index.name}];'
else:
return f'{inner_call_name}({", ".join(call_exprs)});'
@staticmethod
def from_func(
g: NativeFunctionsViewGroup, *, is_reverse: bool
) -> "FunctionalizationLambda":
return FunctionalizationLambda(g, is_reverse)
@dataclass(frozen=True)
class StructuredImplSignature:
g: NativeFunctionsGroup
name: str
def defn(self, name: Optional[str] = None) -> str:
args_str = ", ".join(a.defn() for a in self.arguments())
return f"TORCH_IMPL_FUNC({self.name})({args_str})"
def arguments(self) -> List[Binding]:
return structured.impl_arguments(self.g)
# Helper functions
def kernel_signature(
f: NativeFunction, backend_index: BackendIndex, *, prefix: str = ""
) -> Union["NativeSignature", "DispatcherSignature"]:
# Note [External Backends Follow Dispatcher API]
# Kernel signatures for in-tree backends follow the "native" API,
# while kernels for out-of-tree backends follow the dispatcher API.
# See the comments in `native.py` for details, but historically there have been
# some small differences in schema convention between them and the Dispatcher API.
# Any differences that require translating between the two will results in a runtime cost,
# so we'd like to keep the differences as small as possible.
# With external backends, we'd like to enforce that they write their kernels with schemas
# that match the Dispatcher API directly, if they can.
if backend_index.external:
return DispatcherSignature.from_schema(f.func, prefix=prefix)
else:
return NativeSignature(f.func, prefix)
# Functions only, no types
from torchgen.api import (
cpp,
dispatcher,
functionalization,
native,
structured,
translate,
)
| pytorch-master | torchgen/api/types.py |
from typing import List, Optional, Sequence, Set, Union
from torchgen import local
from torchgen.api.types import (
ArgName,
ArrayCType,
ArrayRefCType,
BaseCType,
BaseTypeToCppMapping,
Binding,
boolT,
ConstRefCType,
CType,
dimnameListT,
intArrayRefT,
ListCType,
longT,
MutRefCType,
NamedCType,
OptionalCType,
optionalIntArrayRefT,
scalarT,
SpecialArgName,
symIntArrayRefT,
SymIntT,
tensorListT,
tensorOptionsT,
tensorT,
TupleCType,
VectorCType,
voidT,
)
from torchgen.model import (
Argument,
Arguments,
BaseTy,
BaseType,
FunctionSchema,
ListType,
NativeFunction,
OptionalType,
Return,
SelfArgument,
TensorOptionsArguments,
Type,
)
from torchgen.utils import assert_never
# This file describes the translation of JIT schema to the public C++
# API, which is what people use when they call functions like at::add.
#
# Prominent characteristics of the C++ API:
#
# - dtype, layout, device and pin_memory are collected into
# a single C++ type TensorOptions (the native functions API
# also has this, but tensor options is really most relevant
# for the C++ API; it makes calling kwarg factory functions
# pleasant)
#
# - defaulting lives here (in fact, the dispatcher is completely
# oblivious of defaults!)
#
# BTW: policy on name collisions: we try not to have types with
# collisions, but functions are fair game to collide
def name(func: FunctionSchema, *, faithful_name_for_out_overloads: bool = False) -> str:
name = str(func.name.name)
if func.is_symint_fn():
name += "_symint"
if func.is_out_fn():
if faithful_name_for_out_overloads:
name += "_outf"
else:
name += "_out"
return name
# Translation of "value types" in JIT schema to C++ API type. Value
# types look the same no matter if they are argument types or return
# types. Returns None if the type in question is not a value type.
def valuetype_type(
t: Type, *, binds: ArgName, remove_non_owning_ref_types: bool = False
) -> Optional[NamedCType]:
if isinstance(t, BaseType):
if t.name == BaseTy.Tensor or t.name == BaseTy.Scalar:
return None
if remove_non_owning_ref_types:
if t.name == BaseTy.str:
raise AssertionError(
"string ref->value conversion: not implemented yet"
)
# All other BaseType currently map directly to BaseCppTypes.
return NamedCType(binds, BaseCType(BaseTypeToCppMapping[t.name]))
elif isinstance(t, OptionalType):
elem = valuetype_type(t.elem, binds=binds)
if elem is None:
return None
return NamedCType(binds, OptionalCType(elem.type))
elif isinstance(t, ListType):
if str(t.elem) == "bool":
assert t.size is not None
return NamedCType(binds, ArrayCType(BaseCType(boolT), t.size))
else:
return None
else:
raise AssertionError(f"unrecognized type {repr(t)}")
# Translation of types occuring in JIT arguments to a C++ argument type.
# If remove_non_owning_ref_types is set, we'll guarantee that the outputed CType is not a non-owning reference type.
# For example, we'll return std::vector<int> instead of IntArrayRef.
# See Note [translation from C++ reference to value types]
def argumenttype_type(
t: Type, *, mutable: bool, binds: ArgName, remove_non_owning_ref_types: bool = False
) -> NamedCType:
# If it's a value type, do the value type translation
r = valuetype_type(
t, binds=binds, remove_non_owning_ref_types=remove_non_owning_ref_types
)
if r is not None:
return r
if isinstance(t, BaseType):
if t.name == BaseTy.Tensor:
if mutable and not local.use_const_ref_for_mutable_tensors():
return NamedCType(binds, MutRefCType(BaseCType(tensorT)))
else:
return NamedCType(binds, ConstRefCType(BaseCType(tensorT)))
elif t.name == BaseTy.Scalar:
return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
else:
raise AssertionError(f"base type should have been value type {t}")
elif isinstance(t, OptionalType):
if str(t.elem) == "Tensor":
if mutable and not local.use_const_ref_for_mutable_tensors():
return NamedCType(
binds, MutRefCType(BaseCType(tensorT))
) # TODO: fix this discrepancy
else:
return NamedCType(
binds, ConstRefCType(OptionalCType(BaseCType(tensorT)))
)
elif str(t.elem) == "Scalar":
return NamedCType(binds, ConstRefCType(OptionalCType(BaseCType(scalarT))))
elif isinstance(t.elem, ListType) and str(t.elem.elem) == "int":
return NamedCType(binds, BaseCType(optionalIntArrayRefT))
elem = argumenttype_type(t.elem, mutable=mutable, binds=binds)
return NamedCType(binds, OptionalCType(elem.type))
elif isinstance(t, ListType):
# TODO: remove these special cases, ArrayRef fallthrough works fine
if str(t.elem) == "int":
if remove_non_owning_ref_types:
return NamedCType(binds, VectorCType(BaseCType(longT)))
else:
return NamedCType(binds, BaseCType(intArrayRefT))
if str(t.elem) == "SymInt":
if remove_non_owning_ref_types:
return NamedCType(binds, VectorCType(BaseCType(SymIntT)))
else:
return NamedCType(binds, BaseCType(symIntArrayRefT))
elif str(t.elem) == "Tensor":
return NamedCType(binds, BaseCType(tensorListT))
elif str(t.elem) == "Scalar":
return NamedCType(binds, ArrayRefCType(BaseCType(scalarT)))
elif str(t.elem) == "Dimname":
return NamedCType(binds, BaseCType(dimnameListT))
elif str(t.elem) == "Tensor?":
return NamedCType(
binds, ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT))))
)
elem = argumenttype_type(t.elem, mutable=mutable, binds=binds)
return NamedCType(binds, ArrayRefCType(elem.type))
else:
raise AssertionError(f"unrecognized type {repr(t)}")
# Translate a JIT argument into its C++ type
def argument_type(a: Argument, *, binds: ArgName) -> NamedCType:
return argumenttype_type(a.type, mutable=a.is_write, binds=binds)
# Translation of a (non-multi) return type from JIT to C++
# N.B: returntype_type returns a CType, not a NamedCType.
# This is mostly because of the mismatch between return types and return names.
# e.g. a function with a return type of 'void' has 0 return names,
# and a function with a return type of 'std::tuple' has >1 return name.
def returntype_type(t: Type, *, mutable: bool) -> CType:
# placeholder is ignored
r = valuetype_type(t, binds="__placeholder__")
if r is not None:
return r.type
if isinstance(t, BaseType):
if t.name == BaseTy.Tensor:
if mutable:
if local.use_const_ref_for_mutable_tensors():
return ConstRefCType(BaseCType(tensorT))
else:
return MutRefCType(BaseCType(tensorT))
else:
# Note [Tensor Copy Returns]
# Currently, we use "Argument.is_write" to determine
# whether or not Tensor return types should be copies or references.
# If that ever changes, take a look at other locations of this note!
return BaseCType(tensorT)
elif t.name == BaseTy.Scalar:
return BaseCType(scalarT)
elif isinstance(t, ListType):
assert (
not mutable
), "Native functions should never return a mutable tensor list. They should return void."
elem = returntype_type(t.elem, mutable=False)
assert t.size is None, f"fixed size list returns not supported: {t}"
return VectorCType(elem)
raise AssertionError(f"unrecognized return type {t}")
# Translation of a single return to its C++ type
def return_type(r: Return) -> CType:
return returntype_type(r.type, mutable=r.is_write)
# Translation of a full (possibly multi) return from JIT to its C++ type
def returns_type(rs: Sequence[Return]) -> CType:
if len(rs) == 0:
return BaseCType(voidT)
elif len(rs) == 1:
return return_type(rs[0])
else:
return TupleCType([return_type(r) for r in rs])
def return_names(f: NativeFunction, *, fallback_name: str = "result") -> Sequence[str]:
returns: List[str] = []
for i, r in enumerate(f.func.returns):
# If we have an inplace function, the return argument is
# implicitly named self.
# TODO: Consider incorporating this into the data model
if f.func.name.name.inplace:
assert i == 0, "illegal inplace function with multiple returns"
name = "self"
# If we are out function, the name is the name of the
# corresponding output function (r.name will get recorded
# in field_name later.)
elif f.func.is_out_fn():
name = f.func.arguments.out[i].name
# If the return argument is explicitly named...
elif r.name:
name_conflict = any(
r.name == a.name for a in f.func.schema_order_arguments()
)
if name_conflict and not f.func.is_out_fn():
name = f"{r.name}_return"
else:
name = r.name
# If there is no explicit name and no fallback name was passed in, we just name the output result,
# unless it's a multi-return, in which case it's result0,
# result1, etc (zero-indexed)
else:
name = fallback_name if len(f.func.returns) == 1 else f"{fallback_name}{i}"
returns.append(name)
return returns
JIT_TO_CPP_DEFAULT = {
"False": "false",
"True": "true",
"None": "c10::nullopt", # UGH this one is type directed
"Mean": "at::Reduction::Mean",
"[]": "{}",
"contiguous_format": "MemoryFormat::Contiguous",
"long": "at::kLong",
}
# Convert a JIT default into C++ expression representing the default
def default_expr(d: str, t: Type) -> str:
if d == "None" and str(t) == "Tensor?":
return "{}"
if isinstance(t, BaseType) and t.name is BaseTy.str:
# Schema allows single quotes but C++ needs double
if len(d) >= 2 and d[0] == "'" and d[-1] == "'":
s = ""
i = 1
while i + 1 < len(d):
if d[i] != "\\":
if d[i] == '"':
s += '\\"'
else:
s += d[i]
i += 1
else:
if d[i + 1] == "'":
s += "'"
else:
s += d[i : i + 2]
i += 2
return f'"{s}"'
if isinstance(t, OptionalType):
if d == "None":
return "c10::nullopt"
return default_expr(d, t.elem)
if isinstance(t, ListType):
if d.startswith("[") and d.endswith("]"):
return "{" + d[1:-1] + "}"
elif t.size is None:
# NOTE: Sized lists can have scalar defaults
raise ValueError(f"Expected a list default '[...]' but found: '{d}'")
return JIT_TO_CPP_DEFAULT.get(d, d)
# Convert an argument into its C++ API form
def argument(
a: Union[Argument, TensorOptionsArguments, SelfArgument],
*,
cpp_no_default_args: Set[str],
method: bool,
faithful: bool,
has_tensor_options: bool,
) -> List[Binding]:
def sub_argument(
a: Union[Argument, TensorOptionsArguments, SelfArgument]
) -> List[Binding]:
return argument(
a,
cpp_no_default_args=cpp_no_default_args,
method=method,
faithful=faithful,
has_tensor_options=has_tensor_options,
)
if isinstance(a, Argument):
binds: ArgName
if a.name == "memory_format" and has_tensor_options:
binds = SpecialArgName.possibly_redundant_memory_format
else:
binds = a.name
default: Optional[str] = None
if a.name not in cpp_no_default_args and a.default is not None:
default = default_expr(a.default, a.type)
return [
Binding(
nctype=argument_type(a, binds=binds),
name=a.name,
default=default,
argument=a,
)
]
elif isinstance(a, TensorOptionsArguments):
if faithful:
return (
sub_argument(a.dtype)
+ sub_argument(a.layout)
+ sub_argument(a.device)
+ sub_argument(a.pin_memory)
)
else:
default = None
# Enforced by NativeFunction.__post_init__
assert "options" not in cpp_no_default_args
if all(x.default == "None" for x in a.all()):
default = "{}"
elif a.dtype.default == "long":
default = "at::kLong" # TODO: this is wrong
return [
Binding(
nctype=NamedCType("options", BaseCType(tensorOptionsT)),
name="options",
default=default,
argument=a,
)
]
elif isinstance(a, SelfArgument):
if method:
# Caller is responsible for installing implicit this in context!
return []
else:
return sub_argument(a.argument)
else:
assert_never(a)
def arguments(
arguments: Arguments, *, faithful: bool, method: bool, cpp_no_default_args: Set[str]
) -> List[Binding]:
args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
if faithful:
args.extend(arguments.non_out)
args.extend(arguments.out)
else:
args.extend(arguments.out)
args.extend(arguments.non_out)
return [
r.no_default() if faithful else r
for a in args
for r in argument(
a,
faithful=faithful,
method=method,
has_tensor_options=arguments.tensor_options is not None,
cpp_no_default_args=cpp_no_default_args,
)
]
| pytorch-master | torchgen/api/cpp.py |
from typing import List, Tuple
from torchgen.api import cpp
from torchgen.api.types import Binding, CppSignatureGroup, CType
from torchgen.model import (
Argument,
BaseTy,
BaseType,
ListType,
NativeFunction,
OptionalType,
Type,
)
# This file generates the code for unboxing wrappers, i.e., the glue logic to unbox a boxed operator and convert the
# ivalues from stack to correct arguments to the unboxed kernel, based on corresponding JIT schema. This codegen is
# an alternative way to generate unboxing wrappers similar to the existing C++ metaprogramming approach but gets the
# job done statically. These generated unboxing wrappers will be useful under the scenario where we need to register
# a fixed set of operators known at compile time and thus can save some time in runtime initialization phase.
#
# Here's an example on how the codegen works:
#
# - Function Schema (source of truth)
#
# aten::empty.names(int[] size, *, Dimname[]? names,
# ScalarType? dtype=None, Layout? layout=None,
# Device? device=None, bool? pin_memory=None,
# MemoryFormat? memory_format=None) -> Tensor
# - Argument Conversion
# Generates C++ code to convert an ivalue (from stack) to its underlying C++ type.
# - int[] size
# ```cpp
# const c10::List<c10::IValue> size_list_in = (std::move(peek(stack, 0, 7))).toList();
#
# std::vector<int64_t> size_vec;
# for (c10::IValue size_elem: size_list_in) {
# int64_t size_base = size_elem.to<int64_t>();
# size_vec.push_back(size_base);
# }
# at::ArrayRef<int64_t> size_list_out(size_vec);
# ~~~~~~~~~~~~~ <-- The converted argument from ivalues in the stack.
# Will be passed to unboxed kernel.
# ```
# - Dimname[]? names
# ```cpp
# c10::optional<c10::IValue> names_opt = (std::move(peek(stack, 1, 7))).toOptional<c10::IValue>();
# c10::optional<at::ArrayRef<at::Dimname>> names_opt_out;
# if (names_opt.has_value()) {
# ~~~~~~~~~~~ <-- Unwrapping optional shell
# const c10::IValue names_opt_in = names_opt.value();
# const c10::List<c10::IValue> names_list_in = names_opt_in.toList();
#
# std::vector<at::Dimname> names_vec;
# for (c10::IValue names_elem: names_list_in) {
# ~~~~~~~~~~~~~~~~~~~~~~~~~ <-- Unrolling list, then convert elements one by one.
# at::Dimname names_base = names_elem.to<at::Dimname>();
# names_vec.push_back(names_base);
# }
# at::ArrayRef<at::Dimname> names_list_out(names_vec);
#
# names_opt_out = c10::optional<at::ArrayRef<at::Dimname>>(names_list_out);
# } else {
# names_opt_out = c10::optional<at::ArrayRef<at::Dimname>>();
# }
# ```
# - ScalarType? dtype (similarly for the rest of the arguments)
# ```cpp
# c10::optional<c10::IValue> dtype_opt = (std::move(peek(stack, 2, 7))).toOptional<c10::IValue>();
# c10::optional<at::ScalarType> dtype_opt_out;
# if (dtype_opt.has_value()) {
# const c10::IValue dtype_opt_in = dtype_opt.value();
# at::ScalarType dtype_base = dtype_opt_in.to<at::ScalarType>();
# ~~~~~~~~~~~~~~~~~~~~ <-- For base types, convert ivalue to it
# directly using ".to<T>()" API.
# dtype_opt_out = c10::optional<at::ScalarType>(dtype_base);
# } else {
# dtype_opt_out = c10::optional<at::ScalarType>();
# }
# ```
#
# - Unboxed Kernel Call
# ```cpp
# auto result_ = torch::empty(
# size_list_out,
# names_opt_out,
# options,
# memory_format_opt_out
# );
# ```
#
# - Push Result Back to Stack
# ```cpp
# drop(stack, 7);
# pack(stack, std::move(result_));
# ```
connector = "\n\t"
# Return unboxing function name for a NativeFunction
def name(f: NativeFunction) -> str:
return f.func.name.unambiguous_name()
# Convert all the arguments in a NativeFunction to C++ code
def convert_arguments(f: NativeFunction) -> Tuple[List[Binding], List[str]]:
# we need the 'self' argument so method needs to be False
args = (
CppSignatureGroup.from_native_function(f, method=False)
.most_faithful_signature()
.arguments()
)
code_list = [
f"c10::IValue {args[i].name} = std::move(peek(stack, {i}, {len(args)}));"
for i in range(len(args))
] + [""]
binding_list = []
for i, arg in enumerate(args):
# expecting only Argument
if not isinstance(arg.argument, Argument):
raise Exception(
f"Unexpected argument type, expecting `Argument` but got {arg}"
)
argument: Argument = arg.argument
unboxed_name, _, code, decl = argumenttype_ivalue_convert(
argument.type, argument.name, mutable=argument.is_write
)
code_list.extend(decl)
code_list.extend(code)
binding_list.append(arg.with_name(unboxed_name))
return binding_list, code_list
# Takes in the type, name and mutability corresponding to an argument, and generates a tuple of:
# (1) the C++ code necessary to unbox the argument
# (2) A Binding corresponding to the newly created unboxed variable, including variable name and its CType
def argumenttype_ivalue_convert(
t: Type, arg_name: str, *, mutable: bool = False
) -> Tuple[str, CType, List[str], List[str]]:
ctype = cpp.argumenttype_type(t=t, mutable=mutable, binds=arg_name).type
if isinstance(t, BaseType):
out_name = f"{arg_name}_base"
code, decl = _gen_code_base_type(
arg_name=arg_name, out_name=out_name, ctype=ctype
)
elif isinstance(t, OptionalType):
out_name = f"{arg_name}_opt_out"
code, decl = _gen_code_optional_type(
arg_name=arg_name, out_name=out_name, t=t, ctype=ctype
)
elif isinstance(t, ListType):
out_name = f"{arg_name}_list_out"
code, decl = _gen_code_list_type(
arg_name=arg_name, out_name=out_name, t=t, ctype=ctype
)
else:
raise Exception(f"Cannot handle type {t}. arg_name: {arg_name}")
return out_name, ctype, code, decl
def _gen_code_base_type(
arg_name: str, out_name: str, ctype: CType
) -> Tuple[List[str], List[str]]:
return [
f"{ctype.cpp_type(strip_ref=True)} {out_name} = {arg_name}.to<{ctype.cpp_type(strip_ref=True)}>();"
], []
def _gen_code_optional_type(
arg_name: str, out_name: str, t: OptionalType, ctype: CType
) -> Tuple[List[str], List[str]]:
in_name = f"{arg_name}_opt_in"
res_name, _, res_code, decl = argumenttype_ivalue_convert(t.elem, in_name)
return (
f"""
c10::optional<c10::IValue> {arg_name}_opt = {arg_name}.toOptional<c10::IValue>();
{ctype.cpp_type(strip_ref=True)} {out_name};
if ({arg_name}_opt.has_value()) {{
const c10::IValue {in_name} = {arg_name}_opt.value();
{connector.join(res_code)}
{out_name} = {ctype.cpp_type(strip_ref=True)}({res_name});
}} else {{
{out_name} = {ctype.cpp_type(strip_ref=True)}();
}}
""".split(
"\n"
),
decl,
)
def _gen_code_list_type(
arg_name: str, out_name: str, t: ListType, ctype: CType
) -> Tuple[List[str], List[str]]:
in_name = f"{arg_name}_list_in"
elem_name = f"{arg_name}_elem"
code = [f"const c10::List<c10::IValue> {in_name} = {arg_name}.toList();"]
res_name, res_ctype, res_code, decl = argumenttype_ivalue_convert(t.elem, elem_name)
# handle list type with size, e.g., bool[4]
if isinstance(t.elem, BaseType) and t.elem.name == BaseTy.bool and t.size:
code.extend(
f"""
{ctype.cpp_type(strip_ref=True)} {out_name} = as_array<{res_ctype.cpp_type(strip_ref=True)}, {t.size}>({in_name});
""".split(
"\n"
)
)
# we have to use c10::List for optional element. e.g., Tensor?[] -> c10::List<c10::optional<at::Tensor>>
elif isinstance(t.elem, OptionalType):
code.extend(
f"""
{ctype.cpp_type(strip_ref=True)} {out_name};
for (c10::IValue {elem_name}: {in_name}) {{
{connector.join(res_code)}
{out_name}.push_back({res_name});
}}
""".split(
"\n"
)
)
else:
# use ArrayRef as default.
vec_name = arg_name + "_vec"
# need to bring vector instantiation out of scope so that ArrayRef has valid data
decl.append(f"std::vector<{res_ctype.cpp_type(strip_ref=True)}> {vec_name};")
code.extend(
f"""
for (c10::IValue {elem_name}: {in_name}) {{
{connector.join(res_code)}
{vec_name}.push_back({res_name});
}}
{ctype.cpp_type(strip_ref=True)} {out_name}({vec_name});
""".split(
"\n"
)
)
return code, decl
| pytorch-master | torchgen/api/unboxing.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.