python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
pytorch-master | benchmarks/instruction_counts/execution/__init__.py |
|
"""Handle the details of subprocess calls and retries for a given benchmark run."""
import dataclasses
import json
import os
import pickle
import signal
import subprocess
import time
from typing import List, Optional, Union, TYPE_CHECKING
import uuid
from core.api import AutoLabels
from core.types import Label
from core.utils import get_temp_dir
from worker.main import WORKER_PATH, WorkerFailure, WorkerOutput, WorkerTimerArgs, WorkerUnpickler
if TYPE_CHECKING:
PopenType = subprocess.Popen[bytes]
else:
PopenType = subprocess.Popen
# Mitigate https://github.com/pytorch/pytorch/issues/37377
_ENV = "MKL_THREADING_LAYER=GNU"
_PYTHON = "python"
PYTHON_CMD = f"{_ENV} {_PYTHON}"
# We must specify `bash` so that `source activate ...` always works
SHELL = "/bin/bash"
@dataclasses.dataclass(frozen=True)
class WorkOrder:
"""Spec to schedule work with the benchmark runner."""
label: Label
autolabels: AutoLabels
timer_args: WorkerTimerArgs
source_cmd: Optional[str] = None
timeout: Optional[float] = None
retries: int = 0
def __hash__(self) -> int:
return id(self)
def __str__(self) -> str:
return json.dumps({
"label": self.label,
"autolabels": self.autolabels.as_dict,
"num_threads": self.timer_args.num_threads,
})
class _BenchmarkProcess:
"""Wraps subprocess.Popen for a given WorkOrder."""
_work_order: WorkOrder
_cpu_list: Optional[str]
_proc: PopenType
# Internal bookkeeping
_communication_file: str
_start_time: float
_end_time: Optional[float] = None
_retcode: Optional[int]
_result: Optional[Union[WorkerOutput, WorkerFailure]] = None
def __init__(self, work_order: WorkOrder, cpu_list: Optional[str]) -> None:
self._work_order = work_order
self._cpu_list = cpu_list
self._start_time = time.time()
self._communication_file = os.path.join(get_temp_dir(), f"{uuid.uuid4()}.pkl")
with open(self._communication_file, "wb") as f:
pickle.dump(self._work_order.timer_args, f)
self._proc = subprocess.Popen(
self.cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
executable=SHELL,
)
def clone(self) -> "_BenchmarkProcess":
return _BenchmarkProcess(self._work_order, self._cpu_list)
@property
def cmd(self) -> str:
cmd: List[str] = []
if self._work_order.source_cmd is not None:
cmd.extend([self._work_order.source_cmd, "&&"])
cmd.append(_ENV)
if self._cpu_list is not None:
cmd.extend([
f"GOMP_CPU_AFFINITY={self._cpu_list}",
"taskset",
"--cpu-list",
self._cpu_list
])
cmd.extend([
_PYTHON, WORKER_PATH,
"--communication_file", self._communication_file,
])
return " ".join(cmd)
@property
def duration(self) -> float:
return (self._end_time or time.time()) - self._start_time
@property
def result(self) -> Union[WorkerOutput, WorkerFailure]:
self._maybe_collect()
assert self._result is not None
return self._result
def poll(self) -> Optional[int]:
self._maybe_collect()
return self._retcode
def interrupt(self) -> None:
"""Soft interrupt. Allows subprocess to cleanup."""
self._proc.send_signal(signal.SIGINT)
def terminate(self) -> None:
"""Hard interrupt. Immediately SIGTERM subprocess."""
self._proc.terminate()
def _maybe_collect(self) -> None:
if self._result is not None:
# We've already collected the results.
return
self._retcode = self._proc.poll()
if self._retcode is None:
# `_proc` is still running
return
with open(self._communication_file, "rb") as f:
result = WorkerUnpickler(f).load_output()
if isinstance(result, WorkerOutput) and self._retcode:
# Worker managed to complete the designated task, but worker
# process did not finish cleanly.
result = WorkerFailure("Worker failed silently.")
if isinstance(result, WorkerTimerArgs):
# Worker failed, but did not write a result so we're left with the
# original TimerArgs. Grabbing all of stdout and stderr isn't
# ideal, but we don't have a better way to determine what to keep.
proc_stdout = self._proc.stdout
assert proc_stdout is not None
result = WorkerFailure(
failure_trace=proc_stdout.read().decode("utf-8"))
self._result = result
self._end_time = time.time()
# Release communication file.
os.remove(self._communication_file)
class InProgress:
"""Used by the benchmark runner to track outstanding jobs.
This class handles bookkeeping and timeout + retry logic.
"""
_proc: _BenchmarkProcess
_timeouts: int = 0
def __init__(self, work_order: WorkOrder, cpu_list: Optional[str]):
self._work_order = work_order
self._proc = _BenchmarkProcess(work_order, cpu_list)
@property
def work_order(self) -> WorkOrder:
return self._proc._work_order
@property
def cpu_list(self) -> Optional[str]:
return self._proc._cpu_list
@property
def proc(self) -> _BenchmarkProcess:
# NB: For cleanup only.
return self._proc
@property
def duration(self) -> float:
return self._proc.duration
def check_finished(self) -> bool:
if self._proc.poll() is not None:
return True
timeout = self.work_order.timeout
if timeout is None or self._proc.duration < timeout:
return False
self._timeouts += 1
max_attempts = (self._work_order.retries or 0) + 1
if self._timeouts < max_attempts:
print(
f"\nTimeout: {self._work_order.label}, {self._work_order.autolabels} "
f"(Attempt {self._timeouts} / {max_attempts})")
self._proc.interrupt()
self._proc = self._proc.clone()
return False
raise subprocess.TimeoutExpired(cmd=self._proc.cmd, timeout=timeout)
@property
def result(self) -> Union[WorkerOutput, WorkerFailure]:
return self._proc.result
def __hash__(self) -> int:
return id(self)
| pytorch-master | benchmarks/instruction_counts/execution/work.py |
"""Collect instruction counts for continuous integration."""
import argparse
import hashlib
import json
import time
from typing import Dict, List, Union
from core.expand import materialize
from definitions.standard import BENCHMARKS
from execution.runner import Runner
from execution.work import WorkOrder
REPEATS = 5
TIMEOUT = 600 # Seconds
RETRIES = 2
VERSION = 0
MD5 = "4d55e8abf881ad38bb617a96714c1296"
def main(argv: List[str]) -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--destination", type=str, default=None)
parser.add_argument("--subset", action="store_true")
args = parser.parse_args(argv)
t0 = int(time.time())
version = VERSION
benchmarks = materialize(BENCHMARKS)
# Useful for local development, since e2e time for the full suite is O(1 hour)
in_debug_mode = (args.subset or args.destination is None)
if args.subset:
version = -1
benchmarks = benchmarks[:10]
work_orders = tuple(
WorkOrder(label, autolabels, timer_args, timeout=TIMEOUT, retries=RETRIES)
for label, autolabels, timer_args in benchmarks * REPEATS
)
keys = tuple({str(work_order): None for work_order in work_orders}.keys())
md5 = hashlib.md5()
for key in keys:
md5.update(key.encode("utf-8"))
# Warn early, since collection takes a long time.
if md5.hexdigest() != MD5 and not args.subset:
version = -1
print(f"WARNING: Expected {MD5}, got {md5.hexdigest()} instead")
results = Runner(work_orders, cadence=30.0).run()
# TODO: Annotate with TypedDict when 3.8 is the minimum supported verson.
grouped_results: Dict[str, Dict[str, List[Union[float, int]]]] = {
key: {"times": [], "counts": []} for key in keys}
for work_order, r in results.items():
key = str(work_order)
grouped_results[key]["times"].extend(r.wall_times)
grouped_results[key]["counts"].extend(r.instructions)
final_results = {
"version": version,
"md5": md5.hexdigest(),
"start_time": t0,
"end_time": int(time.time()),
"values": grouped_results,
}
if args.destination:
with open(args.destination, "wt") as f:
json.dump(final_results, f)
if in_debug_mode:
result_str = json.dumps(final_results)
print(f"{result_str[:30]} ... {result_str[-30:]}\n")
import pdb
pdb.set_trace()
| pytorch-master | benchmarks/instruction_counts/applications/ci.py |
pytorch-master | benchmarks/instruction_counts/applications/__init__.py |
|
pytorch-master | benchmarks/instruction_counts/worker/__init__.py |
|
"""File invoked through subprocess to actually carry out measurements.
`worker/main.py` is deliberately isolated from the rest of the benchmark
infrastructure. Other parts of the benchmark rely on this file, but
`worker/` has only one Python file and does not import ANYTHING from the rest
of the benchmark suite. The reason that this is important is that we can't
rely on paths to access the other files (namely `core.api`) since a source
command might change the CWD. It also helps keep startup time down by limiting
spurious definition work.
The life of a worker is very simple:
It receives a file containing a `WorkerTimerArgs` telling it what to run,
and writes a `WorkerOutput` result back to the same file.
Because this file only expects to run in a child context, error handling means
plumbing failures up to the caller, not raising in this process.
"""
import argparse
import dataclasses
import io
import os
import pickle
import timeit
import traceback
from typing import Any, Tuple, Union, TYPE_CHECKING
import sys
if TYPE_CHECKING:
# Benchmark utils are only partially strict compliant, so MyPy won't follow
# imports using the public namespace. (Due to an exclusion rule in
# mypy-strict.ini)
from torch.utils.benchmark.utils.timer import Language, Timer
from torch.utils.benchmark.utils.valgrind_wrapper.timer_interface import CallgrindStats
else:
from torch.utils.benchmark import CallgrindStats, Language, Timer
WORKER_PATH = os.path.abspath(__file__)
# =============================================================================
# == Interface ================================================================
# =============================================================================
# While the point of this is mainly to collect instruction counts, we're going
# to have to compile C++ timers anyway (as they're used as a check before
# calling Valgrind), so we may as well grab wall times for reference. They
# are comparatively inexpensive.
MIN_RUN_TIME = 5
# Repeats are inexpensive as long as they are all run in the same process. This
# also lets us filter outliers (e.g. malloc arena reorganization), so we don't
# need a high CALLGRIND_NUMBER to get good data.
CALLGRIND_NUMBER = 100
CALLGRIND_REPEATS = 5
@dataclasses.dataclass(frozen=True)
class WorkerTimerArgs:
"""Container for Timer constructor arguments.
This dataclass serves two roles. First, it is a simple interface for
defining benchmarks. (See core.api.GroupedStmts and core.api.GroupedModules
for the advanced interfaces.) Second, it provides serialization for
controlling workers. `Timer` is not pickleable, so instead the main process
will pass `WorkerTimerArgs` instances to workers for processing.
"""
stmt: str
setup: str = "pass"
global_setup: str = ""
num_threads: int = 1
language: Language = Language.PYTHON
@dataclasses.dataclass(frozen=True)
class WorkerOutput:
# Only return values to reduce communication between main process and workers.
wall_times: Tuple[float, ...]
instructions: Tuple[int, ...]
@dataclasses.dataclass(frozen=True)
class WorkerFailure:
# If a worker fails, we attach the string contents of the Exception
# rather than the Exception object itself. This is done for two reasons:
# 1) Depending on the type thrown, `e` may or may not be pickleable
# 2) If we re-throw in the main process, we lose the true stack trace.
failure_trace: str
class WorkerUnpickler(pickle.Unpickler):
def find_class(self, module: str, name: str) -> Any:
"""Resolve import for pickle.
When the main runner uses a symbol `foo` from this file, it sees it as
`worker.main.foo`. However the worker (called as a standalone file)
sees the same symbol as `__main__.foo`. We have to help pickle
understand that they refer to the same symbols.
"""
symbol_map = {
# Only blessed interface Enums and dataclasses need to be mapped.
"WorkerTimerArgs": WorkerTimerArgs,
"WorkerOutput": WorkerOutput,
"WorkerFailure": WorkerFailure,
}
if name in symbol_map:
return symbol_map[name]
return super().find_class(module, name)
def load_input(self) -> WorkerTimerArgs:
result = self.load()
assert isinstance(result, WorkerTimerArgs)
return result
def load_output(self) -> Union[WorkerTimerArgs, WorkerOutput, WorkerFailure]:
"""Convenience method for type safe loading."""
result = self.load()
assert isinstance(result, (WorkerTimerArgs, WorkerOutput, WorkerFailure))
return result
# =============================================================================
# == Execution ================================================================
# =============================================================================
def _run(timer_args: WorkerTimerArgs) -> WorkerOutput:
timer = Timer(
stmt=timer_args.stmt,
setup=timer_args.setup or "pass",
global_setup=timer_args.global_setup,
# Prevent NotImplementedError on GPU builds and C++ snippets.
timer=timeit.default_timer,
num_threads=timer_args.num_threads,
language=timer_args.language,
)
m = timer.blocked_autorange(min_run_time=MIN_RUN_TIME)
stats: Tuple[CallgrindStats, ...] = timer.collect_callgrind(
number=CALLGRIND_NUMBER,
collect_baseline=False,
repeats=CALLGRIND_REPEATS,
retain_out_file=False,
)
return WorkerOutput(
wall_times=tuple(m.times),
instructions=tuple(s.counts(denoise=True) for s in stats)
)
def main(communication_file: str) -> None:
result: Union[WorkerOutput, WorkerFailure]
try:
with open(communication_file, "rb") as f:
timer_args: WorkerTimerArgs = WorkerUnpickler(f).load_input()
assert isinstance(timer_args, WorkerTimerArgs)
result = _run(timer_args)
except KeyboardInterrupt:
# Runner process sent SIGINT.
sys.exit()
except BaseException:
trace_f = io.StringIO()
traceback.print_exc(file=trace_f)
result = WorkerFailure(failure_trace=trace_f.getvalue())
if not os.path.exists(os.path.split(communication_file)[0]):
# This worker is an orphan, and the parent has already cleaned up the
# working directory. In that case we can simply exit.
print(f"Orphaned worker {os.getpid()} exiting.")
return
with open(communication_file, "wb") as f:
pickle.dump(result, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--communication_file', type=str)
communication_file = parser.parse_args().communication_file
main(communication_file)
| pytorch-master | benchmarks/instruction_counts/worker/main.py |
"""Default set of benchmarks.
Parser notes:
`parse_stmts`:
- Width for the left (Python) column MUST be 40 characters.
- The column separator is " | ", not "|". Whitespace matters.
`GroupedVariants`:
- `Setup` and `Global_Setup` (case insensitive) are reserved keywords
to populate `setup` and `global_setup` for every generated benchmark.
- To set a label for the succeeding block, add `# @YOUR_LABEL` (Python)
or `// @YOUR_LABEL` (C++).
"""
from core.api import GroupedModules, GroupedStmts, GroupedVariants
from core.types import FlatIntermediateDefinition
from core.utils import flatten, parse_stmts
from definitions.setup import Setup
BENCHMARKS: FlatIntermediateDefinition = flatten({
"Empty": {
"no allocation": GroupedStmts(
r"torch.empty(())",
r"torch::empty({0});",
),
"with allocation": GroupedStmts(
r"torch.empty((1,))",
r"torch::empty({1});",
),
"overloads": GroupedVariants(
cpp_block=r"""
// @Setup
auto options_empty = c10::TensorOptions();
auto options_full = c10::TensorOptions().dtype(at::kFloat).device(at::kCPU);
auto optional_float = c10::make_optional(at::kFloat);
// @TensorOptions overload
at::empty({0}, options_empty);
at::empty({0}, options_full);
at::empty({0}, at::kFloat); // implicit conversion
// @Faithful overload
at::empty({0}, c10::nullopt, c10::nullopt, c10::nullopt, c10::nullopt, c10::nullopt);
at::empty({0}, at::kFloat, c10::nullopt, c10::nullopt, c10::nullopt, c10::nullopt);
at::empty({0}, optional_float, c10::nullopt, c10::nullopt, c10::nullopt, c10::nullopt);
"""
),
},
"Pointwise": {
"Math": GroupedVariants(*parse_stmts(r"""
Python | C++
---------------------------------------- | ----------------------------------------
# @setup | // @setup
torch.manual_seed(138_10_23) | torch::manual_seed(1381023);
x = torch.rand((4, 4)) | auto x = torch::rand({4, 4});
y_float = torch.ones((4, 4)) | auto y_float = torch::ones({4, 4});
y_vector = torch.ones((4, 1)) | auto y_vector = torch::ones({4, 1});
y_int = torch.ones( | auto y_int = torch::ones({4, 4}, at::kInt);
(4, 4), dtype=torch.int32) |
|
# @add | // @add
x += 1.0 | x += 1;
x += y_float | x += y_float;
x += y_vector | x += y_vector;
x += y_int | x += y_int;
x + y_float | x + y_float;
torch.add(x, y_float) | torch::add(x, y_float);
torch.add(x, y_float, out=x) | torch::add_out(/*out=*/x, x, y_float);
|
# @multiply | // @multiply
x *= 1.0 | x *= 1;
x *= y_float | x *= y_float;
x *= y_vector | x *= y_vector;
x *= y_int | x *= y_int;
x * y_float | x * y_float;
torch.mul(x, y_float) | torch::mul(x, y_float);
torch.mul(x, y_float, out=x) | torch::mul_out(/*out=*/x, x, y_float);
|
# @equality | // @equality
x == y_float | x == y_float;
x == 1.0 | x == 1.0;
""")),
"Data movement": GroupedVariants(*parse_stmts(r"""
Python | C++
---------------------------------------- | ----------------------------------------
# @setup | // @setup
x = torch.ones((4, 4)) | auto x = torch::ones({4, 4});
y = torch.ones((4, 4)) | auto y = torch::ones({4, 4});
x_t = x.t() | auto x_t = x.t();
|
# @contiguous (trivial) | // @contiguous (trivial)
x.contiguous() | x.contiguous();
|
# @contiguous (non-trivial) | // @contiguous (non-trivial)
x_t.contiguous() | x_t.contiguous();
|
# @clone | // @clone
x.clone() | x.clone();
|
# @copy_ | // @copy_
x.copy_(y) | x.copy_(y);
|
# @zero_ | // @zero_
x.zero_() | x.zero_();
|
# @RNG | // @RNG
x.uniform_() | x.uniform_();
""")),
},
"Reduction": GroupedVariants(*parse_stmts(r"""
Python | C++
---------------------------------------- | ----------------------------------------
# @setup | // @setup
x = torch.ones((4, 4)) | auto x = torch::ones({4, 4});
|
# @max | // @max
x.max() | x.max();
|
# @sum | // @sum
x.sum() | x.sum();
|
# @variance | // @variance
x.var(0) | x.var(0);
""")),
"Indexing": GroupedVariants(*parse_stmts(r"""
Python | C++
---------------------------------------- | ----------------------------------------
# @setup | // @setup
| using namespace torch::indexing;
torch.manual_seed(6626_10_34) | torch::manual_seed(66261034);
|
x = torch.randn(1, 1, 1) | auto x = torch::randn({1, 1, 1});
y = torch.randn(1, 1, 1) | auto y = torch::randn({1, 1, 1});
|
# @Tensor-Scalar | // @Tensor-Scalar
x[0] = 1 | x.index_put_({0}, 1);
x[0, 0] = 1 | x.index_put_({0, 0}, 1);
x[0, 0, 0] = 1 | x.index_put_({0, 0, 0}, 1);
|
# @Tensor-Scalar (Advanced) | // @Tensor-Scalar (Advanced)
x[...] = 1 | x.index_put_({"..."}, 1);
x[:] = 1 | x.index_put_({Slice(None, None, None)}, 1);
x[None] = 1 | x.index_put_({None}, 1);
x[False] = 1 | x.index_put_({false}, 1);
x[True] = 1 | x.index_put_({true}, 1);
|
# @Tensor-Tensor | // @Tensor-Tensor
x[0] = y[0] | x.index_put_({0}, y.index({0}));
x[0, 0] = y[0, 0] | x.index_put_({0, 0}, y.index({0, 0}));
x[0, 0, 0] = y[0, 0, 0] | x.index_put_({0, 0, 0}, y.index({0, 0, 0}));
|
# @Tensor-Tensor (Advanced) | // @Tensor-Tensor (Advanced)
x[...] = y[...] | x.index_put_({"..."}, y.index({"..."}));
x[:] = y[:] | x.index_put_({Slice(None, None, None)}, y.index({Slice(None, None, None)}));
x[None] = y[None] | x.index_put_({None}, y.index({None}));
x[False] = y[False] | x.index_put_({false}, y.index({false}));
x[True] = y[True] | x.index_put_({true}, y.index({true}));
""")),
"Metadata and views": GroupedVariants(*parse_stmts(r"""
Python | C++
---------------------------------------- | ----------------------------------------
# @setup | // @setup
x = torch.ones((4, 4)) | auto x = torch::ones({4, 4});
|
# @size | // @size
x.size()[0] | x.sizes()[0];
|
# @stride | // @stride
x.stride(0) | x.stride(0);
|
# @as_strided | // @as_strided
torch.as_strided(x, (2, 3), (4, 1), 2) | torch::as_strided(x, {2, 3}, {4, 1}, 2);
|
# @select | // @select
x.select(1, 1) | x.select(1, 1);
|
# @unsqueeze | // @unsqueeze
x.unsqueeze(0) | x.unsqueeze(0);
|
# @view | // @view
x.view(-1, 1) | x.view({-1, 1});
|
# @transpose | // @transpose
x.t() | x.t();
|
# @reshape | // @reshape
x.reshape((16, 1)) | x.reshape({16, 1});
""")),
"nn Modules": {
py_constructor.split("(")[0]: GroupedModules(
f"model = torch.nn.{py_constructor}",
f"auto model = torch::nn::{cpp_constructor};",
setup=setup.value,
signature="f(x) -> y",
torchscript=torchscript,
)
for setup, torchscript, (py_constructor, cpp_constructor) in (
(Setup.TRIVIAL_4D, True, ("BatchNorm2d(4)",) * 2),
(Setup.TRIVIAL_4D, True, ("GroupNorm(2, 4)",) * 2),
(Setup.TRIVIAL_4D, True, (
"LayerNorm(4)",
"LayerNorm(torch::nn::LayerNormOptions({4}))"
)),
(Setup.TRIVIAL_3D, True, ("Conv1d(4, 4, 1)",) * 2),
(Setup.TRIVIAL_4D, True, ("Conv2d(4, 4, 1)",) * 2),
(Setup.TRIVIAL_4D, True, ("MaxPool2d(2)",) * 2),
(Setup.TRIVIAL_2D, True, ("ReLU()",) * 2),
(Setup.TRIVIAL_2D, True, ("Sigmoid()",) * 2),
(Setup.TRIVIAL_4D, True, ("Linear(4, 2)",) * 2),
# TODO: LSTM can't be TorchScript'd
(Setup.TRIVIAL_3D, False, ("LSTM(4, 2)",) * 2),
)
},
"training": {
"simple": GroupedStmts(
*parse_stmts(r"""
Python | C++
---------------------------------------- | ----------------------------------------
a0 = torch.nn.functional.relu(x * w0) | auto a0 = torch::nn::functional::relu(x * w0);
y = a0 * w1 | auto y = a0 * w1;
"""),
Setup.TRAINING.value,
num_threads=(1, 2),
signature=r"f(x, w0, w1) -> y",
torchscript=True,
autograd=True,
),
"ensemble": GroupedStmts(
*parse_stmts(r"""
Python | C++
---------------------------------------- | ----------------------------------------
a0 = torch.nn.functional.gelu(x * w0) | auto a0 = torch::nn::functional::gelu(x * w0);
a1 = torch.nn.functional.prelu(y, w1) | auto a1 = torch::nn::functional::prelu(y, w1);
z = torch.nn.functional.normalize( | auto z = torch::nn::functional::normalize(
torch.cat([a0, a1]), | torch::cat({a0, a1}),
p=2.0, dim=0, | torch::nn::functional::NormalizeFuncOptions().p(2).dim(0)
).dot(w2) | ).dot(w2);
"""),
Setup.TRAINING.value,
num_threads=(1, 2),
signature=r"f(x, y, w0, w1, w2) -> z",
torchscript=True,
autograd=True,
),
},
"InferenceMode": GroupedVariants(
# In general, the mixed input scenario is less common so its
# perf can be less important than pure inference tensor inputs.
cpp_block=r"""
// @Setup
auto s = torch::ones({3, 3}); // Normal Tensor
c10::InferenceMode guard;
auto x = torch::ones({3, 3}); // Inference Tensor
// @View
torch::Tensor y = x.view({9});
// @Inplace
torch::Tensor y = x.mul_(x);
// @Mixed
torch::Tensor y = x + s;
"""
),
})
| pytorch-master | benchmarks/instruction_counts/definitions/standard.py |
pytorch-master | benchmarks/instruction_counts/definitions/__init__.py |
|
"""Define some common setup blocks which benchmarks can reuse."""
import enum
from core.api import GroupedSetup
from core.utils import parse_stmts
_TRIVIAL_2D = GroupedSetup(
r"x = torch.ones((4, 4))",
r"auto x = torch::ones({4, 4});"
)
_TRIVIAL_3D = GroupedSetup(
r"x = torch.ones((4, 4, 4))",
r"auto x = torch::ones({4, 4, 4});"
)
_TRIVIAL_4D = GroupedSetup(
r"x = torch.ones((4, 4, 4, 4))",
r"auto x = torch::ones({4, 4, 4, 4});"
)
_TRAINING = GroupedSetup(*parse_stmts(
r"""
Python | C++
---------------------------------------- | ----------------------------------------
# Inputs | // Inputs
x = torch.ones((1,)) | auto x = torch::ones({1});
y = torch.ones((1,)) | auto y = torch::ones({1});
|
# Weights | // Weights
w0 = torch.ones( | auto w0 = torch::ones({1});
(1,), requires_grad=True) | w0.set_requires_grad(true);
w1 = torch.ones( | auto w1 = torch::ones({1});
(1,), requires_grad=True) | w1.set_requires_grad(true);
w2 = torch.ones( | auto w2 = torch::ones({2});
(2,), requires_grad=True) | w2.set_requires_grad(true);
"""
))
class Setup(enum.Enum):
TRIVIAL_2D = _TRIVIAL_2D
TRIVIAL_3D = _TRIVIAL_3D
TRIVIAL_4D = _TRIVIAL_4D
TRAINING = _TRAINING
| pytorch-master | benchmarks/instruction_counts/definitions/setup.py |
# Taken from https://github.com/pytorch/audio/blob/master/torchaudio/models/wav2letter.py
# So that we don't need torchaudio to be installed
import torch
from torch import Tensor
from torch import nn
import torch.nn.functional as F
import math
from collections import OrderedDict
from typing import Tuple, Optional
__all__ = ["Wav2Letter"]
class Wav2Letter(nn.Module):
r"""Wav2Letter model architecture from the `"Wav2Letter: an End-to-End ConvNet-based Speech Recognition System"
<https://arxiv.org/abs/1609.03193>`_ paper.
:math:`\text{padding} = \frac{\text{ceil}(\text{kernel} - \text{stride})}{2}`
Args:
num_classes (int, optional): Number of classes to be classified. (Default: ``40``)
input_type (str, optional): Wav2Letter can use as input: ``waveform``, ``power_spectrum``
or ``mfcc`` (Default: ``waveform``).
num_features (int, optional): Number of input features that the network will receive (Default: ``1``).
"""
def __init__(self, num_classes: int = 40,
input_type: str = "waveform",
num_features: int = 1) -> None:
super(Wav2Letter, self).__init__()
acoustic_num_features = 250 if input_type == "waveform" else num_features
acoustic_model = nn.Sequential(
nn.Conv1d(in_channels=acoustic_num_features, out_channels=250, kernel_size=48, stride=2, padding=23),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=2000, kernel_size=32, stride=1, padding=16),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=2000, out_channels=2000, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=2000, out_channels=num_classes, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True)
)
if input_type == "waveform":
waveform_model = nn.Sequential(
nn.Conv1d(in_channels=num_features, out_channels=250, kernel_size=250, stride=160, padding=45),
nn.ReLU(inplace=True)
)
self.acoustic_model = nn.Sequential(waveform_model, acoustic_model)
if input_type in ["power_spectrum", "mfcc"]:
self.acoustic_model = acoustic_model
def forward(self, x: Tensor) -> Tensor:
r"""
Args:
x (Tensor): Tensor of dimension (batch_size, num_features, input_length).
Returns:
Tensor: Predictor tensor of dimension (batch_size, number_of_classes, input_length).
"""
x = self.acoustic_model(x)
x = nn.functional.log_softmax(x, dim=1)
return x
# Taken from https://github.com/SeanNaren/deepspeech.pytorch with modifications
class SequenceWise(nn.Module):
def __init__(self, module):
"""
Collapses input of dim T*N*H to (T*N)*H, and applies to a module.
Allows handling of variable sequence lengths and minibatch sizes.
:param module: Module to apply input to.
"""
super(SequenceWise, self).__init__()
self.module = module
def forward(self, x):
t, n = x.size(0), x.size(1)
x = x.view(t * n, -1)
x = self.module(x)
x = x.view(t, n, -1)
return x
def __repr__(self):
tmpstr = self.__class__.__name__ + ' (\n'
tmpstr += self.module.__repr__()
tmpstr += ')'
return tmpstr
class MaskConv(nn.Module):
def __init__(self, seq_module):
"""
Adds padding to the output of the module based on the given lengths. This is to ensure that the
results of the model do not change when batch sizes change during inference.
Input needs to be in the shape of (BxCxDxT)
:param seq_module: The sequential module containing the conv stack.
"""
super(MaskConv, self).__init__()
self.seq_module = seq_module
def forward(self, x, lengths):
"""
:param x: The input of size BxCxDxT
:param lengths: The actual length of each sequence in the batch
:return: Masked output from the module
"""
for module in self.seq_module:
x = module(x)
mask = torch.BoolTensor(x.size()).fill_(0)
if x.is_cuda:
mask = mask.cuda()
for i, length in enumerate(lengths):
length = length.item()
if (mask[i].size(2) - length) > 0:
mask[i].narrow(2, length, mask[i].size(2) - length).fill_(1)
x = x.masked_fill(mask, 0)
return x, lengths
class InferenceBatchSoftmax(nn.Module):
def forward(self, input_):
if not self.training:
return F.softmax(input_, dim=-1)
else:
return input_
class BatchRNN(nn.Module):
def __init__(self, input_size, hidden_size, rnn_type=nn.LSTM, bidirectional=False, batch_norm=True):
super(BatchRNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bidirectional = bidirectional
self.batch_norm = SequenceWise(nn.BatchNorm1d(input_size)) if batch_norm else None
self.rnn = rnn_type(input_size=input_size, hidden_size=hidden_size,
bidirectional=bidirectional, bias=True)
self.num_directions = 2 if bidirectional else 1
def flatten_parameters(self):
self.rnn.flatten_parameters()
def forward(self, x, output_lengths):
if self.batch_norm is not None:
x = self.batch_norm(x)
x = nn.utils.rnn.pack_padded_sequence(x, output_lengths, enforce_sorted=False)
x, h = self.rnn(x)
x, _ = nn.utils.rnn.pad_packed_sequence(x)
if self.bidirectional:
x = x.view(x.size(0), x.size(1), 2, -1).sum(2).view(x.size(0), x.size(1), -1) # (TxNxH*2) -> (TxNxH) by sum
return x
class Lookahead(nn.Module):
# Wang et al 2016 - Lookahead Convolution Layer for Unidirectional Recurrent Neural Networks
# input shape - sequence, batch, feature - TxNxH
# output shape - same as input
def __init__(self, n_features, context):
super(Lookahead, self).__init__()
assert context > 0
self.context = context
self.n_features = n_features
self.pad = (0, self.context - 1)
self.conv = nn.Conv1d(self.n_features, self.n_features, kernel_size=self.context, stride=1,
groups=self.n_features, padding=0, bias=None)
def forward(self, x):
x = x.transpose(0, 1).transpose(1, 2)
x = F.pad(x, pad=self.pad, value=0)
x = self.conv(x)
x = x.transpose(1, 2).transpose(0, 1).contiguous()
return x
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'n_features=' + str(self.n_features) \
+ ', context=' + str(self.context) + ')'
class DeepSpeech(nn.Module):
def __init__(self, rnn_type, labels, rnn_hidden_size, nb_layers, audio_conf,
bidirectional, context=20):
super(DeepSpeech, self).__init__()
self.hidden_size = rnn_hidden_size
self.hidden_layers = nb_layers
self.rnn_type = rnn_type
self.audio_conf = audio_conf
self.labels = labels
self.bidirectional = bidirectional
sample_rate = self.audio_conf["sample_rate"]
window_size = self.audio_conf["window_size"]
num_classes = len(self.labels)
self.conv = MaskConv(nn.Sequential(
nn.Conv2d(1, 32, kernel_size=(41, 11), stride=(2, 2), padding=(20, 5)),
nn.BatchNorm2d(32),
nn.Hardtanh(0, 20, inplace=True),
nn.Conv2d(32, 32, kernel_size=(21, 11), stride=(2, 1), padding=(10, 5)),
nn.BatchNorm2d(32),
nn.Hardtanh(0, 20, inplace=True)
))
# Based on above convolutions and spectrogram size using conv formula (W - F + 2P)/ S+1
rnn_input_size = int(math.floor((sample_rate * window_size) / 2) + 1)
rnn_input_size = int(math.floor(rnn_input_size + 2 * 20 - 41) / 2 + 1)
rnn_input_size = int(math.floor(rnn_input_size + 2 * 10 - 21) / 2 + 1)
rnn_input_size *= 32
rnns = []
rnn = BatchRNN(input_size=rnn_input_size, hidden_size=rnn_hidden_size, rnn_type=rnn_type,
bidirectional=bidirectional, batch_norm=False)
rnns.append(('0', rnn))
for x in range(nb_layers - 1):
rnn = BatchRNN(input_size=rnn_hidden_size, hidden_size=rnn_hidden_size, rnn_type=rnn_type,
bidirectional=bidirectional)
rnns.append(('%d' % (x + 1), rnn))
self.rnns = nn.Sequential(OrderedDict(rnns))
self.lookahead = nn.Sequential(
# consider adding batch norm?
Lookahead(rnn_hidden_size, context=context),
nn.Hardtanh(0, 20, inplace=True)
) if not bidirectional else None
fully_connected = nn.Sequential(
nn.BatchNorm1d(rnn_hidden_size),
nn.Linear(rnn_hidden_size, num_classes, bias=False)
)
self.fc = nn.Sequential(
SequenceWise(fully_connected),
)
self.inference_softmax = InferenceBatchSoftmax()
def forward(self, x, lengths):
lengths = lengths.cpu().int()
output_lengths = self.get_seq_lens(lengths)
x, _ = self.conv(x, output_lengths)
sizes = x.size()
x = x.view(sizes[0], sizes[1] * sizes[2], sizes[3]) # Collapse feature dimension
x = x.transpose(1, 2).transpose(0, 1).contiguous() # TxNxH
for rnn in self.rnns:
x = rnn(x, output_lengths)
if not self.bidirectional: # no need for lookahead layer in bidirectional
x = self.lookahead(x)
x = self.fc(x)
x = x.transpose(0, 1)
# identity in training mode, softmax in eval mode
x = self.inference_softmax(x)
return x, output_lengths
def get_seq_lens(self, input_length):
"""
Given a 1D Tensor or Variable containing integer sequence lengths, return a 1D tensor or variable
containing the size sequences that will be output by the network.
:param input_length: 1D Tensor
:return: 1D Tensor scaled by model
"""
seq_len = input_length
for m in self.conv.modules():
if type(m) == nn.modules.conv.Conv2d:
seq_len = seq_len + 2 * m.padding[1] - m.dilation[1] * (m.kernel_size[1] - 1) - 1
seq_len = seq_len.true_divide(m.stride[1]) + 1
return seq_len.int()
# Taken from https://github.com/pytorch/examples/blob/master/word_language_model/model.py#L108-L152
class PositionalEncoding(nn.Module):
r"""Inject some information about the relative or absolute position of the tokens
in the sequence. The positional encodings have the same dimension as
the embeddings, so that the two can be summed. Here, we use sine and cosine
functions of different frequencies.
.. math::
\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
\text{where pos is the word position and i is the embed idx)
Args:
d_model: the embed dim (required).
dropout: the dropout value (default=0.1).
max_len: the max. length of the incoming sequence (default=5000).
Examples:
>>> pos_encoder = PositionalEncoding(d_model)
"""
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
r"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
Shape:
x: [sequence length, batch size, embed dim]
output: [sequence length, batch size, embed dim]
Examples:
>>> output = pos_encoder(x)
"""
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class TransformerModel(nn.Module):
"""Container module with an encoder, a recurrent or transformer module, and a decoder."""
def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
super(TransformerModel, self).__init__()
try:
from torch.nn import TransformerEncoder, TransformerEncoderLayer
except Exception:
raise ImportError('TransformerEncoder module does not exist in PyTorch 1.1 or lower.')
self.model_type = 'Transformer'
self.src_mask = None
self.pos_encoder = PositionalEncoding(ninp, dropout)
encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.encoder = nn.Embedding(ntoken, ninp)
self.ninp = ninp
self.decoder = nn.Linear(ninp, ntoken)
self.init_weights()
def init_weights(self):
initrange = 0.1
nn.init.uniform_(self.encoder.weight, -initrange, initrange)
# Not sure how this works in the original code
# nn.init.zeros_(self.decoder)
nn.init.uniform_(self.decoder.weight, -initrange, initrange)
def forward(self, src, has_mask=True):
if has_mask:
device = src.device
# This will be created once during warmup
if self.src_mask is None or self.src_mask.size(0) != len(src):
mask = nn.Transformer.generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
else:
self.src_mask = None
src = self.encoder(src) * math.sqrt(self.ninp)
src = self.pos_encoder(src)
output = self.transformer_encoder(src, self.src_mask)
output = self.decoder(output)
return F.log_softmax(output, dim=-1)
# From https://github.com/pytorch/text/blob/master/torchtext/modules
class MultiheadAttentionContainer(torch.nn.Module):
def __init__(self, nhead, in_proj_container, attention_layer, out_proj):
r""" A multi-head attention container
Args:
nhead: the number of heads in the multiheadattention model
in_proj_container: A container of multi-head in-projection linear layers (a.k.a nn.Linear).
attention_layer: The attention layer.
out_proj: The multi-head out-projection layer (a.k.a nn.Linear).
Examples::
>>> import torch
>>> embed_dim, num_heads, bsz = 10, 5, 64
>>> in_proj_container = InProjContainer(torch.nn.Linear(embed_dim, embed_dim),
torch.nn.Linear(embed_dim, embed_dim),
torch.nn.Linear(embed_dim, embed_dim))
>>> MHA = MultiheadAttentionContainer(num_heads,
in_proj_container,
ScaledDotProduct(),
torch.nn.Linear(embed_dim, embed_dim))
>>> query = torch.rand((21, bsz, embed_dim))
>>> key = value = torch.rand((16, bsz, embed_dim))
>>> attn_output, attn_weights = MHA(query, key, value)
>>> print(attn_output.shape)
>>> torch.Size([21, 64, 10])
"""
super(MultiheadAttentionContainer, self).__init__()
self.nhead = nhead
self.in_proj_container = in_proj_container
self.attention_layer = attention_layer
self.out_proj = out_proj
def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor,
attn_mask: Optional[torch.Tensor] = None,
bias_k: Optional[torch.Tensor] = None,
bias_v: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]:
r"""
Args:
query, key, value (Tensor): map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
attn_mask, bias_k and bias_v (Tensor, optional): keyword arguments passed to the attention layer.
See the definitions in the attention.
Shape:
- Inputs:
- query: :math:`(L, N, E)`
- key: :math:`(S, N, E)`
- value: :math:`(S, N, E)`
- attn_mask, bias_k and bias_v: same with the shape of the corresponding args in attention layer.
- Outputs:
- attn_output: :math:`(L, N, E)`
- attn_output_weights: :math:`(N * H, L, S)`
where where L is the target length, S is the sequence length, H is the number of attention heads,
N is the batch size, and E is the embedding dimension.
"""
tgt_len, src_len, bsz, embed_dim = query.size(-3), key.size(-3), query.size(-2), query.size(-1)
q, k, v = self.in_proj_container(query, key, value)
assert q.size(-1) % self.nhead == 0, "query's embed_dim must be divisible by the number of heads"
head_dim = q.size(-1) // self.nhead
q = q.reshape(tgt_len, bsz * self.nhead, head_dim)
assert k.size(-1) % self.nhead == 0, "key's embed_dim must be divisible by the number of heads"
head_dim = k.size(-1) // self.nhead
k = k.reshape(src_len, bsz * self.nhead, head_dim)
assert v.size(-1) % self.nhead == 0, "value's embed_dim must be divisible by the number of heads"
head_dim = v.size(-1) // self.nhead
v = v.reshape(src_len, bsz * self.nhead, head_dim)
attn_output, attn_output_weights = self.attention_layer(q, k, v, attn_mask=attn_mask,
bias_k=bias_k, bias_v=bias_v)
attn_output = attn_output.reshape(tgt_len, bsz, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_output_weights
class ScaledDotProduct(torch.nn.Module):
def __init__(self, dropout=0.0):
r"""Processes a projected query and key-value pair to apply
scaled dot product attention.
Args:
dropout (float): probability of dropping an attention weight.
Examples::
>>> SDP = torchtext.models.ScaledDotProduct(0.1)
>>> q = torch.randn(256, 21, 3)
>>> k = v = torch.randn(256, 21, 3)
>>> attn_output, attn_weights = SDP(q, k, v)
>>> print(attn_output.shape, attn_weights.shape)
torch.Size([256, 21, 3]) torch.Size([256, 21, 21])
"""
super(ScaledDotProduct, self).__init__()
self.dropout = dropout
def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor,
attn_mask: Optional[torch.Tensor] = None,
bias_k: Optional[torch.Tensor] = None,
bias_v: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Uses a scaled dot product with the projected key-value pair to update
the projected query.
Args:
query (Tensor): Projected query
key (Tensor): Projected key
value (Tensor): Projected value
attn_mask (BoolTensor, optional): 3D mask that prevents attention to certain positions.
bias_k and bias_v: (Tensor, optional): one more key and value sequence to be added at
sequence dim (dim=-3). Those are used for incremental decoding. Users should provide
non-None to both arguments in order to activate them.
Shape:
- query: :math:`(L, N * H, E / H)`
- key: :math:`(S, N * H, E / H)`
- value: :math:`(S, N * H, E / H)`
- attn_mask: :math:`(N * H, L, S)`, positions with ``True`` are not allowed to attend
while ``False`` values will be unchanged.
- bias_k and bias_v:bias: :math:`(1, N * H, E / H)`
- Output: :math:`(L, N * H, E / H)`, :math:`(N * H, L, S)`
where L is the target length, S is the source length, H is the number
of attention heads, N is the batch size, and E is the embedding dimension.
"""
if bias_k is not None and bias_v is not None:
assert key.size(-1) == bias_k.size(-1) and key.size(-2) == bias_k.size(-2) and bias_k.size(-3) == 1, \
"Shape of bias_k is not supported"
assert value.size(-1) == bias_v.size(-1) and value.size(-2) == bias_v.size(-2) and bias_v.size(-3) == 1, \
"Shape of bias_v is not supported"
key = torch.cat([key, bias_k])
value = torch.cat([value, bias_v])
if attn_mask is not None:
_attn_mask = attn_mask
attn_mask = torch.nn.functional.pad(_attn_mask, [0, 1])
tgt_len, head_dim = query.size(-3), query.size(-1)
assert query.size(-1) == key.size(-1) == value.size(-1), "The feature dim of query, key, value must be equal."
assert key.size() == value.size(), "Shape of key, value must match"
src_len = key.size(-3)
batch_heads = max(query.size(-2), key.size(-2))
# Scale query
query, key, value = query.transpose(-2, -3), key.transpose(-2, -3), value.transpose(-2, -3)
query = query * (float(head_dim) ** -0.5)
if attn_mask is not None:
if attn_mask.dim() != 3:
raise RuntimeError('attn_mask must be a 3D tensor.')
if (attn_mask.size(-1) != src_len) or (attn_mask.size(-2) != tgt_len) or \
(attn_mask.size(-3) != 1 and attn_mask.size(-3) != batch_heads):
raise RuntimeError('The size of the attn_mask is not correct.')
if attn_mask.dtype != torch.bool:
raise RuntimeError('Only bool tensor is supported for attn_mask')
# Dot product of q, k
attn_output_weights = torch.matmul(query, key.mT)
if attn_mask is not None:
attn_output_weights.masked_fill_(attn_mask, -1e8,)
attn_output_weights = torch.nn.functional.softmax(attn_output_weights, dim=-1)
attn_output_weights = torch.nn.functional.dropout(attn_output_weights, p=self.dropout, training=self.training)
attn_output = torch.matmul(attn_output_weights, value)
return attn_output.transpose(-2, -3), attn_output_weights
class InProjContainer(torch.nn.Module):
def __init__(self, query_proj, key_proj, value_proj):
r"""A in-proj container to process inputs.
Args:
query_proj: a proj layer for query.
key_proj: a proj layer for key.
value_proj: a proj layer for value.
"""
super(InProjContainer, self).__init__()
self.query_proj = query_proj
self.key_proj = key_proj
self.value_proj = value_proj
def forward(self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
r"""Projects the input sequences using in-proj layers.
Args:
query, key, value (Tensors): sequence to be projected
Shape:
- query, key, value: :math:`(S, N, E)`
- Output: :math:`(S, N, E)`
where S is the sequence length, N is the batch size, and E is the embedding dimension.
"""
return self.query_proj(query), self.key_proj(key), self.value_proj(value)
| pytorch-master | benchmarks/functional_autograd_benchmark/torchaudio_models.py |
import torch
from torch import Tensor
import torchvision_models as models
from utils import check_for_functorch, extract_weights, load_weights, GetterReturnType
from typing import cast
has_functorch = check_for_functorch()
def get_resnet18(device: torch.device) -> GetterReturnType:
N = 32
model = models.resnet18(pretrained=False)
if has_functorch:
from functorch.experimental import replace_all_batch_norm_modules_
replace_all_batch_norm_modules_(model)
criterion = torch.nn.CrossEntropyLoss()
model.to(device)
params, names = extract_weights(model)
inputs = torch.rand([N, 3, 224, 224], device=device)
labels = torch.rand(N, device=device).mul(10).long()
def forward(*new_params: Tensor) -> Tensor:
load_weights(model, names, new_params)
out = model(inputs)
loss = criterion(out, labels)
return loss
return forward, params
def get_fcn_resnet(device: torch.device) -> GetterReturnType:
N = 8
criterion = torch.nn.MSELoss()
model = models.fcn_resnet50(pretrained=False, pretrained_backbone=False)
if has_functorch:
from functorch.experimental import replace_all_batch_norm_modules_
replace_all_batch_norm_modules_(model)
# disable dropout for consistency checking
model.eval()
model.to(device)
params, names = extract_weights(model)
inputs = torch.rand([N, 3, 480, 480], device=device)
# Given model has 21 classes
labels = torch.rand([N, 21, 480, 480], device=device)
def forward(*new_params: Tensor) -> Tensor:
load_weights(model, names, new_params)
out = model(inputs)['out']
loss = criterion(out, labels)
return loss
return forward, params
def get_detr(device: torch.device) -> GetterReturnType:
# All values below are from CLI defaults in https://github.com/facebookresearch/detr
N = 2
num_classes = 91
hidden_dim = 256
nheads = 8
num_encoder_layers = 6
num_decoder_layers = 6
model = models.DETR(num_classes=num_classes, hidden_dim=hidden_dim, nheads=nheads,
num_encoder_layers=num_encoder_layers, num_decoder_layers=num_decoder_layers)
if has_functorch:
from functorch.experimental import replace_all_batch_norm_modules_
replace_all_batch_norm_modules_(model)
losses = ['labels', 'boxes', 'cardinality']
eos_coef = 0.1
bbox_loss_coef = 5
giou_loss_coef = 2
weight_dict = {'loss_ce': 1, 'loss_bbox': bbox_loss_coef, 'loss_giou': giou_loss_coef}
matcher = models.HungarianMatcher(1, 5, 2)
criterion = models.SetCriterion(num_classes=num_classes, matcher=matcher, weight_dict=weight_dict,
eos_coef=eos_coef, losses=losses)
model = model.to(device)
criterion = criterion.to(device)
params, names = extract_weights(model)
inputs = torch.rand(N, 3, 800, 1200, device=device)
labels = []
for idx in range(N):
targets = {}
n_targets: int = int(torch.randint(5, 10, size=tuple()).item())
label = torch.randint(5, 10, size=(n_targets,), device=device)
targets["labels"] = label
boxes = torch.randint(100, 800, size=(n_targets, 4), device=device)
for t in range(n_targets):
if boxes[t, 0] > boxes[t, 2]:
boxes[t, 0], boxes[t, 2] = boxes[t, 2], boxes[t, 0]
if boxes[t, 1] > boxes[t, 3]:
boxes[t, 1], boxes[t, 3] = boxes[t, 3], boxes[t, 1]
targets["boxes"] = boxes.float()
labels.append(targets)
def forward(*new_params: Tensor) -> Tensor:
load_weights(model, names, new_params)
out = model(inputs)
loss = criterion(out, labels)
weight_dict = criterion.weight_dict
final_loss = cast(Tensor, sum(loss[k] * weight_dict[k] for k in loss.keys() if k in weight_dict))
return final_loss
return forward, params
| pytorch-master | benchmarks/functional_autograd_benchmark/vision_models.py |
import torch
from torch.autograd import functional
import time
from argparse import ArgumentParser
from collections import defaultdict
from typing import NamedTuple, Callable, List, Any
try:
import functorch as ft
has_functorch = True
print(f"Found functorch: {ft.__version__}")
except ImportError:
has_functorch = False
import ppl_models
import vision_models
import audio_text_models
from utils import to_markdown_table, TimingResultType, InputsType, GetterType, VType
def get_task_func(task: str) -> Callable:
def hessian_fwdrev(model, inp, strict=None):
return functional.hessian(model, inp, strict=False, vectorize=True, outer_jacobian_strategy="forward-mode")
def hessian_revrev(model, inp, strict=None):
return functional.hessian(model, inp, strict=False, vectorize=True)
def jacfwd(model, inp, strict=None):
return functional.jacobian(model, inp, strict=False, vectorize=True, strategy="forward-mode")
def jacrev(model, inp, strict=None):
return functional.jacobian(model, inp, strict=False, vectorize=True)
if task == "hessian_fwdrev":
return hessian_fwdrev
elif task == "hessian_revrev":
return hessian_revrev
elif task == "jacfwd":
return jacfwd
elif task == "jacrev":
return jacrev
else:
return getattr(functional, task)
def get_task_functorch(task: str) -> Callable:
@torch.no_grad()
def vjp(model, inp, v=None, strict=None):
assert v is not None
out, vjpfunc = ft.vjp(model, *inp)
return out, vjpfunc(v)
@torch.no_grad()
def jvp(model, inp, v=None, strict=None):
assert v is not None
return ft.jvp(model, inp, v)
@torch.no_grad()
def vhp(model, inp, v=None, strict=None):
assert v is not None
argnums = tuple(range(len(inp)))
_, vjpfunc, aux = ft.vjp(ft.grad_and_value(model, argnums), *inp, has_aux=True)
return aux, vjpfunc(v)
@torch.no_grad()
def hvp(model, inp, v=None, strict=None):
assert v is not None
argnums = tuple(range(len(inp)))
_, hvp_out, aux = ft.jvp(ft.grad_and_value(model, argnums), inp, v, has_aux=True)
return aux, hvp_out
@torch.no_grad()
def jacfwd(model, inp, v=None, strict=None):
argnums = tuple(range(len(inp)))
return ft.jacfwd(model, argnums)(*inp)
@torch.no_grad()
def jacrev(model, inp, v=None, strict=None):
argnums = tuple(range(len(inp)))
return ft.jacrev(model, argnums)(*inp)
@torch.no_grad()
def hessian(model, inp, v=None, strict=None):
argnums = tuple(range(len(inp)))
return ft.hessian(model, argnums=argnums)(*inp)
@torch.no_grad()
def hessian_fwdrev(model, inp, v=None, strict=None):
argnums = tuple(range(len(inp)))
return ft.jacfwd(ft.jacrev(model, argnums=argnums), argnums=argnums)(*inp)
@torch.no_grad()
def hessian_revrev(model, inp, v=None, strict=None):
argnums = tuple(range(len(inp)))
return ft.jacrev(ft.jacrev(model, argnums=argnums), argnums=argnums)(*inp)
if task in locals():
return locals()[task]
elif task == "jacobian":
raise RuntimeError("functorch has no equivalent of autograd.functional.jacobian with vectorize=False yet")
else:
raise RuntimeError(f"Unsupported task: {task}")
# Listing of the different tasks
FAST_TASKS_NO_DOUBLE_BACK = [
"vjp",
]
FAST_TASKS = FAST_TASKS_NO_DOUBLE_BACK + [
"vhp",
"jvp",
]
ALL_TASKS_NON_VECTORIZED = FAST_TASKS + [
"hvp",
"jacobian",
"hessian"
]
DOUBLE_BACKWARD_TASKS = ["jvp", "hvp", "vhp", "hessian"]
VECTORIZED_TASKS = ["hessian_fwdrev", "hessian_revrev", "jacfwd", "jacrev"]
ALL_TASKS = ALL_TASKS_NON_VECTORIZED + VECTORIZED_TASKS
# Model definition which contains:
# - name: a string with the model name.
# - getter: a function to get the model. It takes as input the device on which the model
# will run. It should return the forward function and the parameters (Tensors) used as
# input for the forward function. Note that the forward must *not* have any side effect.
# - tasks: the list of recommended tasks that can run in a reasonable amount of time with this model.
# - unsupported: the list of tasks that this model cannot run.
class ModelDef(NamedTuple):
name: str
getter: GetterType
tasks: List[str]
unsupported: List[str]
MODELS = [
ModelDef("resnet18", vision_models.get_resnet18, FAST_TASKS, []),
ModelDef("fcn_resnet", vision_models.get_fcn_resnet, FAST_TASKS, []),
ModelDef("detr", vision_models.get_detr, FAST_TASKS, []),
ModelDef("ppl_simple_reg", ppl_models.get_simple_regression, ALL_TASKS, []),
ModelDef("ppl_robust_reg", ppl_models.get_robust_regression, ALL_TASKS, []),
ModelDef("wav2letter", audio_text_models.get_wav2letter, FAST_TASKS, []),
ModelDef("deepspeech", audio_text_models.get_deepspeech, FAST_TASKS_NO_DOUBLE_BACK, DOUBLE_BACKWARD_TASKS),
ModelDef("transformer", audio_text_models.get_transformer, FAST_TASKS, []),
ModelDef("multiheadattn", audio_text_models.get_multiheadattn, FAST_TASKS, []),
]
def get_v_for(model: Callable, inp: InputsType, task: str) -> VType:
v: VType
if task in ["vjp"]:
out = model(*inp)
v = torch.rand_like(out)
elif task in ["jvp", "hvp", "vhp"]:
if isinstance(inp, tuple):
v = tuple(torch.rand_like(i) for i in inp)
else:
v = torch.rand_like(inp)
else:
v = None
return v
def run_once(model: Callable, inp: InputsType, task: str, v: VType, **kwargs) -> None:
func = get_task_func(task)
if v is not None:
res = func(model, inp, v=v, strict=True)
else:
res = func(model, inp, strict=True)
def run_once_functorch(model: Callable, inp: InputsType, task: str, v: VType, maybe_check_consistency=False) -> None:
func = get_task_functorch(task)
if v is not None:
res = func(model, inp, v=v, strict=True)
else:
res = func(model, inp, strict=True)
if maybe_check_consistency:
af_func = get_task_func(task)
if v is not None:
expected = af_func(model, inp, v=v, strict=True)
else:
expected = af_func(model, inp, strict=True)
atol = 1e-2 if task == "vhp" else 5e-3
torch.testing.assert_close(res, expected, rtol=1e-5, atol=atol, msg=f"Consistency fail for task '{task}'")
def run_model(model_getter: GetterType, args: Any, task: str, run_once_fn: Callable = run_once) -> List[float]:
if args.gpu == -1:
device = torch.device("cpu")
def noop():
pass
do_sync = noop
else:
device = torch.device("cuda:{}".format(args.gpu))
do_sync = torch.cuda.synchronize
model, inp = model_getter(device)
v = get_v_for(model, inp, task)
# Warmup
# maybe_check_consistency=True checks for consistency between
# functorch vs autograd.functional and is done in run_once_functorch only
run_once_fn(model, inp, task, v, maybe_check_consistency=True)
elapsed = []
for it in range(args.num_iters):
do_sync()
start = time.time()
run_once_fn(model, inp, task, v)
do_sync()
elapsed.append(time.time() - start)
return elapsed
def main():
parser = ArgumentParser("Main script to benchmark functional API of the autograd.")
parser.add_argument("--output", type=str, default="", help="Text file where to write the output")
parser.add_argument("--num-iters", type=int, default=10)
parser.add_argument("--gpu", type=int, default=-2, help="GPU to use, -1 for CPU and -2 for auto-detect")
parser.add_argument("--run-slow-tasks", action="store_true", help="Run even the slow tasks")
parser.add_argument("--model-filter", type=str, default="", help="Only run the models in this filter")
parser.add_argument("--task-filter", type=str, default="", help="Only run the tasks in this filter")
parser.add_argument("--num-threads", type=int, default=10,
help="Number of concurrent threads to use when running on cpu")
parser.add_argument("--seed", type=int, default=0, help="The random seed to use.")
args = parser.parse_args()
results: TimingResultType = defaultdict(defaultdict)
torch.set_num_threads(args.num_threads)
torch.set_num_interop_threads(args.num_threads)
# This automatically seed cuda if it is available
torch.manual_seed(args.seed)
if args.gpu == -2:
args.gpu = 0 if torch.cuda.is_available() else -1
for name, model_getter, recommended_tasks, unsupported_tasks in MODELS:
if args.model_filter and name not in args.model_filter:
continue
tasks = ALL_TASKS if args.run_slow_tasks else recommended_tasks
for task in tasks:
if task in unsupported_tasks:
continue
if args.task_filter and task not in args.task_filter:
continue
runtimes = run_model(model_getter, args, task)
runtimes = torch.tensor(runtimes)
mean, var = runtimes.mean(), runtimes.var()
results[name][task] = (mean.item(), var.item())
print("Results for model {} on task {}: {}s (var: {})".format(name, task, mean, var))
if has_functorch:
try:
runtimes = run_model(model_getter, args, task, run_once_fn=run_once_functorch)
except RuntimeError as e:
print(f"Failed model using Functorch: {name}, task: {task}, Error message: \n\t", e)
continue
runtimes = torch.tensor(runtimes)
mean, var = runtimes.mean(), runtimes.var()
results[name][f"functorch {task}"] = (mean.item(), var.item())
print("Results for model {} on task {} using Functorch: {}s (var: {})".format(name, task, mean, var))
if args.output:
with open(args.output, "w") as f:
f.write(to_markdown_table(results))
if __name__ == "__main__":
main()
| pytorch-master | benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py |
# Taken from https://github.com/pytorch/vision
# So that we don't need torchvision to be installed
import torch
from torch import nn
from torch.nn import functional as F
from torch.jit.annotations import Dict
from collections import OrderedDict
try:
from scipy.optimize import linear_sum_assignment
scipy_available = True
except Exception:
scipy_available = False
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
# if pretrained:
# state_dict = load_state_dict_from_url(model_urls[arch],
# progress=progress)
# model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
class IntermediateLayerGetter(nn.ModuleDict):
"""
Module wrapper that returns intermediate layers from a model
It has a strong assumption that the modules have been registered
into the model in the same order as they are used.
This means that one should **not** reuse the same nn.Module
twice in the forward if you want this to work.
Additionally, it is only able to query submodules that are directly
assigned to the model. So if `model` is passed, `model.feature1` can
be returned, but not `model.feature1.layer2`.
Args:
model (nn.Module): model on which we will extract the features
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
the key of the dict, and the value of the dict is the name
of the returned activation (which the user can specify).
Examples::
>>> m = torchvision.models.resnet18(pretrained=True)
>>> # extract layer1 and layer3, giving as names `feat1` and feat2`
>>> new_m = torchvision.models._utils.IntermediateLayerGetter(m,
>>> {'layer1': 'feat1', 'layer3': 'feat2'})
>>> out = new_m(torch.rand(1, 3, 224, 224))
>>> print([(k, v.shape) for k, v in out.items()])
>>> [('feat1', torch.Size([1, 64, 56, 56])),
>>> ('feat2', torch.Size([1, 256, 14, 14]))]
"""
_version = 2
__annotations__ = {
"return_layers": Dict[str, str],
}
def __init__(self, model, return_layers):
if not set(return_layers).issubset([name for name, _ in model.named_children()]):
raise ValueError("return_layers are not present in model")
orig_return_layers = return_layers
return_layers = {str(k): str(v) for k, v in return_layers.items()}
layers = OrderedDict()
for name, module in model.named_children():
layers[name] = module
if name in return_layers:
del return_layers[name]
if not return_layers:
break
super(IntermediateLayerGetter, self).__init__(layers)
self.return_layers = orig_return_layers
def forward(self, x):
out = OrderedDict()
for name, module in self.items():
x = module(x)
if name in self.return_layers:
out_name = self.return_layers[name]
out[out_name] = x
return out
class _SimpleSegmentationModel(nn.Module):
__constants__ = ['aux_classifier']
def __init__(self, backbone, classifier, aux_classifier=None):
super(_SimpleSegmentationModel, self).__init__()
self.backbone = backbone
self.classifier = classifier
self.aux_classifier = aux_classifier
def forward(self, x):
input_shape = x.shape[-2:]
# contract: features is a dict of tensors
features = self.backbone(x)
result = OrderedDict()
x = features["out"]
x = self.classifier(x)
x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)
result["out"] = x
if self.aux_classifier is not None:
x = features["aux"]
x = self.aux_classifier(x)
x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)
result["aux"] = x
return result
class FCN(_SimpleSegmentationModel):
"""
Implements a Fully-Convolutional Network for semantic segmentation.
Args:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
classifier (nn.Module): module that takes the "out" element returned from
the backbone and returns a dense prediction.
aux_classifier (nn.Module, optional): auxiliary classifier used during training
"""
pass
class FCNHead(nn.Sequential):
def __init__(self, in_channels, channels):
inter_channels = in_channels // 4
layers = [
nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
nn.BatchNorm2d(inter_channels),
nn.ReLU(),
nn.Dropout(0.1),
nn.Conv2d(inter_channels, channels, 1)
]
super(FCNHead, self).__init__(*layers)
def _segm_resnet(name, backbone_name, num_classes, aux, pretrained_backbone=True):
# backbone = resnet.__dict__[backbone_name](
# pretrained=pretrained_backbone,
# replace_stride_with_dilation=[False, True, True])
# Hardcoded resnet 50
assert backbone_name == "resnet50"
backbone = resnet50(
pretrained=pretrained_backbone,
replace_stride_with_dilation=[False, True, True])
return_layers = {'layer4': 'out'}
if aux:
return_layers['layer3'] = 'aux'
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
aux_classifier = None
if aux:
inplanes = 1024
aux_classifier = FCNHead(inplanes, num_classes)
model_map = {
# 'deeplabv3': (DeepLabHead, DeepLabV3), # Not used
'fcn': (FCNHead, FCN),
}
inplanes = 2048
classifier = model_map[name][0](inplanes, num_classes)
base_model = model_map[name][1]
model = base_model(backbone, classifier, aux_classifier)
return model
def _load_model(arch_type, backbone, pretrained, progress, num_classes, aux_loss, **kwargs):
if pretrained:
aux_loss = True
model = _segm_resnet(arch_type, backbone, num_classes, aux_loss, **kwargs)
# if pretrained:
# arch = arch_type + '_' + backbone + '_coco'
# model_url = model_urls[arch]
# if model_url is None:
# raise NotImplementedError('pretrained {} is not supported as of now'.format(arch))
# else:
# state_dict = load_state_dict_from_url(model_url, progress=progress)
# model.load_state_dict(state_dict)
return model
def fcn_resnet50(pretrained=False, progress=True,
num_classes=21, aux_loss=None, **kwargs):
"""Constructs a Fully-Convolutional Network model with a ResNet-50 backbone.
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017 which
contains the same classes as Pascal VOC
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _load_model('fcn', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs)
# Taken from @fmassa example slides and https://github.com/facebookresearch/detr
class DETR(nn.Module):
"""
Demo DETR implementation.
Demo implementation of DETR in minimal number of lines, with the
following differences wrt DETR in the paper:
* learned positional encoding (instead of sine)
* positional encoding is passed at input (instead of attention)
* fc bbox predictor (instead of MLP)
The model achieves ~40 AP on COCO val5k and runs at ~28 FPS on Tesla V100.
Only batch size 1 supported.
"""
def __init__(self, num_classes, hidden_dim=256, nheads=8,
num_encoder_layers=6, num_decoder_layers=6):
super().__init__()
# create ResNet-50 backbone
self.backbone = resnet50()
del self.backbone.fc
# create conversion layer
self.conv = nn.Conv2d(2048, hidden_dim, 1)
# create a default PyTorch transformer
self.transformer = nn.Transformer(
hidden_dim, nheads, num_encoder_layers, num_decoder_layers)
# prediction heads, one extra class for predicting non-empty slots
# note that in baseline DETR linear_bbox layer is 3-layer MLP
self.linear_class = nn.Linear(hidden_dim, num_classes + 1)
self.linear_bbox = nn.Linear(hidden_dim, 4)
# output positional encodings (object queries)
self.query_pos = nn.Parameter(torch.rand(100, hidden_dim))
# spatial positional encodings
# note that in baseline DETR we use sine positional encodings
self.row_embed = nn.Parameter(torch.rand(50, hidden_dim // 2))
self.col_embed = nn.Parameter(torch.rand(50, hidden_dim // 2))
def forward(self, inputs):
# propagate inputs through ResNet-50 up to avg-pool layer
x = self.backbone.conv1(inputs)
x = self.backbone.bn1(x)
x = self.backbone.relu(x)
x = self.backbone.maxpool(x)
x = self.backbone.layer1(x)
x = self.backbone.layer2(x)
x = self.backbone.layer3(x)
x = self.backbone.layer4(x)
# convert from 2048 to 256 feature planes for the transformer
h = self.conv(x)
# construct positional encodings
H, W = h.shape[-2:]
pos = torch.cat([
self.col_embed[:W].unsqueeze(0).repeat(H, 1, 1),
self.row_embed[:H].unsqueeze(1).repeat(1, W, 1),
], dim=-1).flatten(0, 1).unsqueeze(1)
# propagate through the transformer
# TODO (alband) Why this is not automatically broadcasted? (had to add the repeat)
f = pos + 0.1 * h.flatten(2).permute(2, 0, 1)
s = self.query_pos.unsqueeze(1)
s = s.expand(s.size(0), inputs.size(0), s.size(2))
h = self.transformer(f, s).transpose(0, 1)
# finally project transformer outputs to class labels and bounding boxes
return {'pred_logits': self.linear_class(h),
'pred_boxes': self.linear_bbox(h).sigmoid()}
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / area
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_area(boxes):
"""
Computes the area of a set of bounding boxes, which are specified by its
(x1, y1, x2, y2) coordinates.
Args:
boxes (Tensor[N, 4]): boxes for which the area will be computed. They
are expected to be in (x1, y1, x2, y2) format
Returns:
area (Tensor[N]): area for each box
"""
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
# modified from torchvision to also return the union
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
def is_dist_avail_and_initialized():
return False
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
@torch.no_grad()
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
if target.numel() == 0:
return [torch.zeros([], device=output.device)]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class SetCriterion(nn.Module):
""" This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
""" Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], self.num_classes,
dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {'loss_ce': loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in this one here
losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
return losses
@torch.no_grad()
def loss_cardinality(self, outputs, targets, indices, num_boxes):
""" Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes
This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients
"""
pred_logits = outputs['pred_logits']
device = pred_logits.device
tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device)
# Count the number of predictions that are NOT "no-object" (which is the last class)
card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
losses = {'cardinality_error': card_err}
return losses
def loss_boxes(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, h, w), normalized by the image size.
"""
assert 'pred_boxes' in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs['pred_boxes'][idx]
target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')
losses = {}
losses['loss_bbox'] = loss_bbox.sum() / num_boxes
loss_giou = 1 - torch.diag(generalized_box_iou(
box_cxcywh_to_xyxy(src_boxes),
box_cxcywh_to_xyxy(target_boxes)))
losses['loss_giou'] = loss_giou.sum() / num_boxes
return losses
def loss_masks(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the masks: the focal loss and the dice loss.
targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
"""
assert "pred_masks" in outputs
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
src_masks = outputs["pred_masks"]
# TODO use valid to mask invalid areas due to padding in loss
target_masks, valid = nested_tensor_from_tensor_list([t["masks"] for t in targets]).decompose()
target_masks = target_masks.to(src_masks)
src_masks = src_masks[src_idx]
# upsample predictions to the target size
src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:],
mode="bilinear", align_corners=False)
src_masks = src_masks[:, 0].flatten(1)
target_masks = target_masks[tgt_idx].flatten(1)
losses = {
"loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes),
"loss_dice": dice_loss(src_masks, target_masks, num_boxes),
}
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
loss_map = {
'labels': self.loss_labels,
'cardinality': self.loss_cardinality,
'boxes': self.loss_boxes,
'masks': self.loss_masks
}
assert loss in loss_map, f'do you really want to compute {loss} loss?'
return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
def forward(self, outputs, targets):
""" This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
# Compute the average number of target boxes across all nodes, for normalization purposes
num_boxes = sum(len(t["labels"]) for t in targets)
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_boxes)
num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if 'aux_outputs' in outputs:
for i, aux_outputs in enumerate(outputs['aux_outputs']):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
if loss == 'masks':
# Intermediate masks losses are too costly to compute, we ignore them.
continue
kwargs = {}
if loss == 'labels':
# Logging is enabled only for the last layer
kwargs = {'log': False}
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)
l_dict = {k + f'_{i}': v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
class HungarianMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
@torch.no_grad()
def forward(self, outputs, targets):
""" Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
bs, num_queries = outputs["pred_logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes]
out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
# Also concat the target labels and boxes
tgt_ids = torch.cat([v["labels"] for v in targets])
tgt_bbox = torch.cat([v["boxes"] for v in targets])
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be ommitted.
cost_class = -out_prob[:, tgt_ids]
# Compute the L1 cost between boxes
cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)
# Compute the giou cost betwen boxes
cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))
# Final cost matrix
C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
C = C.view(bs, num_queries, -1).cpu()
sizes = [len(v["boxes"]) for v in targets]
if not scipy_available:
raise RuntimeError("The 'detr' model requires scipy to run. Please make sure you have it installed"
" if you enable the 'detr' model.")
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
| pytorch-master | benchmarks/functional_autograd_benchmark/torchvision_models.py |
import torch
from collections import defaultdict
from torch import nn, Tensor
from typing import List, Tuple, Dict, Union, Callable
# Type helpers
InputsType = Union[Tensor, Tuple[Tensor, ...]]
# A Getter takes in a device and returns a callable and the inputs to that callable
GetterReturnType = Tuple[Callable[..., Tensor], InputsType]
GetterType = Callable[[torch.device], GetterReturnType]
# V here refers to the v in either vjp, jvp, vhp or hvp
VType = Union[None, Tensor, Tuple[Tensor, ...]]
# Type used to store timing results. The first key is the model name, the second key
# is the task name, the result is a Tuple of: speedup, mean_before, var_before, mean_after, var_after.
TimingResultType = Dict[str, Dict[str, Tuple[float, ...]]]
# Utilities to make nn.Module "functional"
# In particular the goal is to be able to provide a function that takes as input
# the parameters and evaluate the nn.Module using fixed inputs.
def _del_nested_attr(obj: nn.Module, names: List[str]) -> None:
"""
Deletes the attribute specified by the given list of names.
For example, to delete the attribute obj.conv.weight,
use _del_nested_attr(obj, ['conv', 'weight'])
"""
if len(names) == 1:
delattr(obj, names[0])
else:
_del_nested_attr(getattr(obj, names[0]), names[1:])
def _set_nested_attr(obj: nn.Module, names: List[str], value: Tensor) -> None:
"""
Set the attribute specified by the given list of names to value.
For example, to set the attribute obj.conv.weight,
use _del_nested_attr(obj, ['conv', 'weight'], value)
"""
if len(names) == 1:
setattr(obj, names[0], value)
else:
_set_nested_attr(getattr(obj, names[0]), names[1:], value)
def extract_weights(mod: nn.Module) -> Tuple[Tuple[Tensor, ...], List[str]]:
"""
This function removes all the Parameters from the model and
return them as a tuple as well as their original attribute names.
The weights must be re-loaded with `load_weights` before the model
can be used again.
Note that this function modifies the model in place and after this
call, mod.parameters() will be empty.
"""
orig_params = tuple(mod.parameters())
# Remove all the parameters in the model
names = []
for name, p in list(mod.named_parameters()):
_del_nested_attr(mod, name.split("."))
names.append(name)
# Make params regular Tensors instead of nn.Parameter
params = tuple(p.detach().requires_grad_() for p in orig_params)
return params, names
def load_weights(mod: nn.Module, names: List[str], params: Tuple[Tensor, ...]) -> None:
"""
Reload a set of weights so that `mod` can be used again to perform a forward pass.
Note that the `params` are regular Tensors (that can have history) and so are left
as Tensors. This means that mod.parameters() will still be empty after this call.
"""
for name, p in zip(names, params):
_set_nested_attr(mod, name.split("."), p)
# Utilities to read/write markdown table-like content.
def to_markdown_table(res: TimingResultType, header: Tuple[str, ...] = None) -> str:
if header is None:
header = ("model", "task", "mean", "var")
out = ""
def write_line(*args):
nonlocal out
out += "| {} |\n".format(" | ".join(str(a) for a in args))
# Make it a markdown table
write_line(*header)
write_line(*["--"] * len(header))
for model, tasks in res.items():
for task, line in tasks.items():
write_line(*(model, task) + line)
return out
def from_markdown_table(data: str) -> TimingResultType:
out = data.strip().split("\n")
out = out[2:] # Ignore the header lines
res: TimingResultType
res = defaultdict(defaultdict)
for line in out:
model, task, mean, var = [f.strip() for f in line.strip().split("|") if f]
res[model][task] = (float(mean), float(var))
return res
def check_for_functorch():
try:
import functorch # noqa: F401
return True
except ImportError:
return False
| pytorch-master | benchmarks/functional_autograd_benchmark/utils.py |
import torch
from torch import Tensor
import torch.distributions as dist
from utils import GetterReturnType
def get_simple_regression(device: torch.device) -> GetterReturnType:
N = 10
K = 10
loc_beta = 0.
scale_beta = 1.
beta_prior = dist.Normal(loc_beta, scale_beta)
X = torch.rand(N, K + 1, device=device)
Y = torch.rand(N, 1, device=device)
# X.shape: (N, K + 1), Y.shape: (N, 1), beta_value.shape: (K + 1, 1)
beta_value = beta_prior.sample((K + 1, 1))
beta_value.requires_grad_(True)
def forward(beta_value: Tensor) -> Tensor:
mu = X.mm(beta_value)
# We need to compute the first and second gradient of this score with respect
# to beta_value. We disable Bernoulli validation because Y is a relaxed value.
score = (dist.Bernoulli(logits=mu, validate_args=False).log_prob(Y).sum() +
beta_prior.log_prob(beta_value).sum())
return score
return forward, (beta_value.to(device),)
def get_robust_regression(device: torch.device) -> GetterReturnType:
N = 10
K = 10
# X.shape: (N, K + 1), Y.shape: (N, 1)
X = torch.rand(N, K + 1, device=device)
Y = torch.rand(N, 1, device=device)
# Predefined nu_alpha and nu_beta, nu_alpha.shape: (1, 1), nu_beta.shape: (1, 1)
nu_alpha = torch.rand(1, 1, device=device)
nu_beta = torch.rand(1, 1, device=device)
nu = dist.Gamma(nu_alpha, nu_beta)
# Predefined sigma_rate: sigma_rate.shape: (N, 1)
sigma_rate = torch.rand(N, 1, device=device)
sigma = dist.Exponential(sigma_rate)
# Predefined beta_mean and beta_sigma: beta_mean.shape: (K + 1, 1), beta_sigma.shape: (K + 1, 1)
beta_mean = torch.rand(K + 1, 1, device=device)
beta_sigma = torch.rand(K + 1, 1, device=device)
beta = dist.Normal(beta_mean, beta_sigma)
nu_value = nu.sample()
nu_value.requires_grad_(True)
sigma_value = sigma.sample()
sigma_unconstrained_value = sigma_value.log()
sigma_unconstrained_value.requires_grad_(True)
beta_value = beta.sample()
beta_value.requires_grad_(True)
def forward(nu_value: Tensor, sigma_unconstrained_value: Tensor, beta_value: Tensor) -> Tensor:
sigma_constrained_value = sigma_unconstrained_value.exp()
mu = X.mm(beta_value)
# For this model, we need to compute the following three scores:
# We need to compute the first and second gradient of this score with respect
# to nu_value.
nu_score = dist.StudentT(nu_value, mu, sigma_constrained_value).log_prob(Y).sum() \
+ nu.log_prob(nu_value)
# We need to compute the first and second gradient of this score with respect
# to sigma_unconstrained_value.
sigma_score = dist.StudentT(nu_value, mu, sigma_constrained_value).log_prob(Y).sum() \
+ sigma.log_prob(sigma_constrained_value) \
+ sigma_unconstrained_value
# We need to compute the first and second gradient of this score with respect
# to beta_value.
beta_score = dist.StudentT(nu_value, mu, sigma_constrained_value).log_prob(Y).sum() \
+ beta.log_prob(beta_value)
return nu_score.sum() + sigma_score.sum() + beta_score.sum()
return forward, (nu_value.to(device), sigma_unconstrained_value.to(device), beta_value.to(device))
| pytorch-master | benchmarks/functional_autograd_benchmark/ppl_models.py |
import torch
from torch import nn, Tensor
import torchaudio_models as models
from utils import check_for_functorch, extract_weights, load_weights, GetterReturnType
has_functorch = check_for_functorch()
def get_wav2letter(device: torch.device) -> GetterReturnType:
N = 10
input_frames = 700
vocab_size = 28
model = models.Wav2Letter(num_classes=vocab_size)
criterion = torch.nn.NLLLoss()
model.to(device)
params, names = extract_weights(model)
inputs = torch.rand([N, 1, input_frames], device=device)
labels = torch.rand(N, 3, device=device).mul(vocab_size).long()
def forward(*new_params: Tensor) -> Tensor:
load_weights(model, names, new_params)
out = model(inputs)
loss = criterion(out, labels)
return loss
return forward, params
def get_deepspeech(device: torch.device) -> GetterReturnType:
sample_rate = 16000
window_size = 0.02
window = "hamming"
audio_conf = dict(sample_rate=sample_rate,
window_size=window_size,
window=window,
noise_dir=None)
N = 10
num_classes = 10
spectrogram_size = 161
# Commented are the original sizes in the code
seq_length = 500 # 1343
target_length = 10 # 50
labels = torch.rand(num_classes, device=device)
inputs = torch.rand(N, 1, spectrogram_size, seq_length, device=device)
# Sequence length for each input
inputs_sizes = torch.rand(N, device=device).mul(seq_length * 0.1).add(seq_length * 0.8)
targets = torch.rand(N, target_length, device=device)
targets_sizes = torch.full((N,), target_length, dtype=torch.int, device=device)
model = models.DeepSpeech(rnn_type=nn.LSTM, labels=labels, rnn_hidden_size=1024, nb_layers=5,
audio_conf=audio_conf, bidirectional=True)
if has_functorch:
from functorch.experimental import replace_all_batch_norm_modules_
replace_all_batch_norm_modules_(model)
model = model.to(device)
criterion = nn.CTCLoss()
params, names = extract_weights(model)
def forward(*new_params: Tensor) -> Tensor:
load_weights(model, names, new_params)
out, out_sizes = model(inputs, inputs_sizes)
out = out.transpose(0, 1) # For ctc loss
loss = criterion(out, targets, out_sizes, targets_sizes)
return loss
return forward, params
def get_transformer(device: torch.device) -> GetterReturnType:
# For most SOTA research, you would like to have embed to 720, nhead to 12, bsz to 64, tgt_len/src_len to 128.
N = 64
seq_length = 128
ntoken = 50
model = models.TransformerModel(ntoken=ntoken, ninp=720, nhead=12, nhid=2048, nlayers=2)
model.to(device)
if has_functorch:
# disable dropout for consistency checking
model.eval()
criterion = nn.NLLLoss()
params, names = extract_weights(model)
data = torch.rand(N, seq_length + 1, device=device).mul(ntoken).long()
inputs = data.narrow(1, 0, seq_length)
targets = data.narrow(1, 1, seq_length)
def forward(*new_params: Tensor) -> Tensor:
load_weights(model, names, new_params)
out = model(inputs)
loss = criterion(out.reshape(N * seq_length, ntoken), targets.reshape(N * seq_length))
return loss
return forward, params
def get_multiheadattn(device: torch.device) -> GetterReturnType:
# From https://github.com/pytorch/text/blob/master/test/data/test_modules.py#L10
embed_dim, nhead, tgt_len, src_len, bsz = 10, 5, 6, 10, 64
# Build torchtext MultiheadAttention module
in_proj = models.InProjContainer(torch.nn.Linear(embed_dim, embed_dim, bias=False),
torch.nn.Linear(embed_dim, embed_dim, bias=False),
torch.nn.Linear(embed_dim, embed_dim, bias=False))
model = models.MultiheadAttentionContainer(nhead, in_proj,
models.ScaledDotProduct(),
torch.nn.Linear(embed_dim, embed_dim, bias=False))
model.to(device)
params, names = extract_weights(model)
query = torch.rand((tgt_len, bsz, embed_dim), device=device)
key = value = torch.rand((src_len, bsz, embed_dim), device=device)
attn_mask_2D = torch.randint(0, 2, (tgt_len, src_len), device=device).to(torch.bool)
bias_k = bias_v = torch.rand((1, 1, embed_dim), device=device)
attn_mask = torch.stack([attn_mask_2D] * (bsz * nhead))
bias_k = bias_k.repeat(1, bsz, 1).reshape(1, bsz * nhead, -1)
bias_v = bias_v.repeat(1, bsz, 1).reshape(1, bsz * nhead, -1)
def forward(*new_params: Tensor) -> Tensor:
load_weights(model, names, new_params)
mha_output, attn_weights = model(query, key, value, attn_mask=attn_mask, bias_k=bias_k, bias_v=bias_v)
# Don't test any specific loss, just backprop ones for both outputs
loss = mha_output.sum() + attn_weights.sum()
return loss
return forward, params
| pytorch-master | benchmarks/functional_autograd_benchmark/audio_text_models.py |
import argparse
from collections import defaultdict
from utils import to_markdown_table, from_markdown_table
def main():
parser = argparse.ArgumentParser("Main script to compare results from the benchmarks")
parser.add_argument("--before", type=str, default="before.txt", help="Text file containing the times to use as base")
parser.add_argument("--after", type=str, default="after.txt", help="Text file containing the times to use as new version")
parser.add_argument("--output", type=str, default="", help="Text file where to write the output")
args = parser.parse_args()
with open(args.before, "r") as f:
content = f.read()
res_before = from_markdown_table(content)
with open(args.after, "r") as f:
content = f.read()
res_after = from_markdown_table(content)
diff = defaultdict(defaultdict)
for model in res_before:
for task in res_before[model]:
mean_before, var_before = res_before[model][task]
if task not in res_after[model]:
diff[model][task] = (None, mean_before, var_before, None, None)
else:
mean_after, var_after = res_after[model][task]
diff[model][task] = (mean_before / mean_after, mean_before, var_before, mean_after, var_after)
for model in res_after:
for task in res_after[model]:
if task not in res_before[model]:
mean_after, var_after = res_after[model][task]
diff[model][task] = (None, None, None, mean_after, var_after)
header = ("model", "task", "speedup", "mean (before)", "var (before)", "mean (after)", "var (after)")
out = to_markdown_table(diff, header=header)
print(out)
if args.output:
with open(args.output, "w") as f:
f.write(out)
if __name__ == "__main__":
main()
| pytorch-master | benchmarks/functional_autograd_benchmark/compare.py |
from benchmark_core import _register_test
from benchmark_pytorch import create_pytorch_op_test_case
def generate_pt_test(configs, pt_bench_op):
""" This function creates PyTorch op test based on the given operator
"""
_register_test(configs, pt_bench_op, create_pytorch_op_test_case, False)
def generate_pt_gradient_test(configs, pt_bench_op):
""" This function creates PyTorch op test based on the given operator
"""
_register_test(configs, pt_bench_op, create_pytorch_op_test_case, True)
def generate_pt_tests_from_op_list(ops_list, configs, pt_bench_op):
""" This function creates pt op tests one by one from a list of dictionaries.
ops_list is a list of dictionary. Each dictionary includes
the name of the operator and the math operation. Here is an example of using this API:
unary_ops_configs = op_bench.config_list(
attrs=[...],
attr_names=["M", "N"],
)
unary_ops_list = op_bench.op_list(
attr_names=["op_name", "op_func"],
attrs=[
["abs", torch.abs],
],
)
class UnaryOpBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, op_name, op_func):
...
def forward(self):
...
op_bench.generate_pt_tests_from_op_list(unary_ops_list, unary_ops_configs, UnaryOpBenchmark)
"""
for op in ops_list:
_register_test(configs, pt_bench_op, create_pytorch_op_test_case, False, op)
def generate_pt_gradient_tests_from_op_list(ops_list, configs, pt_bench_op):
for op in ops_list:
_register_test(configs, pt_bench_op, create_pytorch_op_test_case, True, op)
| pytorch-master | benchmarks/operator_benchmark/benchmark_test_generator.py |
from caffe2.python import workspace
from caffe2.python import core
from caffe2.proto import caffe2_pb2
import benchmark_utils
from collections import namedtuple
from benchmark_test_generator import _register_test
"""Caffe2 performance microbenchmarks.
This module contains Caffe2-specific functionalities for performance
microbenchmarks.
"""
class Caffe2BenchmarkBase(object):
""" This is a base class used to create Caffe2 operator benchmark
"""
tensor_index = 0
test_index = 0
def __init__(self):
self.args = {}
self.user_provided_name = None
self._num_inputs_require_grads = 0
self._pass_count = 0
def _set_backward_test(self, is_backward):
pass
def _device_option(self, device):
""" This method is used to set device option.
"""
if device not in ['cuda', 'cpu']:
raise ValueError("Missing attrs in configs")
if 'cuda' in device:
self.dev = core.DeviceOption(caffe2_pb2.CUDA, 0)
else:
self.dev = core.DeviceOption(caffe2_pb2.CPU)
return self.dev
def tensor(self, shapes, dtype='float32', device='cpu'):
""" A wapper function to create C2 tensor filled with random data.
The name/label of the tensor is returned and it is available
throughout the benchmark execution phase.
Args:
shapes: int or a sequence of ints to defining the shapes of the tensor
dtype: use the dtypes from numpy
(https://docs.scipy.org/doc/numpy/user/basics.types.html)
Return:
C2 tensor of dtype
"""
return self.feed_tensor(benchmark_utils.numpy_random(dtype, *shapes), device)
def feed_tensor(self, tensor, device='cpu'):
""" Similar to tensor, but can supply any data compatible with FeedBlob
"""
blob_name = 'blob_' + str(Caffe2BenchmarkBase.tensor_index)
dev = self._device_option(device)
with core.DeviceScope(dev):
workspace.FeedBlob(blob_name, tensor)
Caffe2BenchmarkBase.tensor_index += 1
return blob_name
def module_name(self):
""" this is used to label the operator being benchmarked
"""
if self.user_provided_name:
return self.user_provided_name
return self.__class__.__name__
def set_module_name(self, name):
self.user_provided_name = name
def _value_to_str(self, value):
""" if value is bool, we will convert it to 0 and 1
"""
ret = value
if type(value) == bool:
ret = int(value)
return str(ret)
def test_name(self, name_type="long", **kargs):
""" this is a globally unique name which can be used to
label a specific test
"""
if name_type == "long":
test_name_str = []
for key in kargs:
value = kargs[key]
test_name_str.append(
key + self._value_to_str(value))
name = (self.module_name() + '_' +
'_'.join(test_name_str)).replace(" ", "")
elif name_type == "short":
# this is used to generate test name based on unique index
name = '_'.join([self.module_name(), 'test', str(Caffe2BenchmarkBase.test_index)])
Caffe2BenchmarkBase.test_index += 1
return name
def extract_inputs_tuple(self):
# add a dummy function here to match the interface of TorchBenchmarkBase
pass
class Caffe2OperatorTestCase(object):
""" This class includes all the information needed to benchmark an operator.
op_bench: it's a user-defined class (child of Caffe2BenchmarkBase)
which includes input and operator, .etc
test_config: a namedtuple includes test_name, input_shape, tag, run_backward.
When run_backward is false, the run_forward method will be executed, otherwise
run_backward method will be executed.
"""
def __init__(self, op_bench, test_config):
self.op_bench = op_bench
self.test_config = test_config
self.framework = "Caffe2"
def run_forward(self, num_runs, print_per_iter=False, cuda_sync=False):
""" Run the forward path of an operator in a loop
"""
with core.DeviceScope(self.op_bench.dev):
op = self.op_bench.forward()
if not workspace.RunOperatorMultiple(op, num_runs):
raise ValueError("Unable to run operator test case: {}".format(self.test_name))
def run_backward(self, num_runs, print_per_iter=False):
""" Run the backward path of an operator in a loop
"""
with core.DeviceScope(self.op_bench.dev):
op = self.op_bench.backward()
if not workspace.RunOperatorMultiple(op, num_runs):
raise ValueError("Unable to run operator gradient test case: {}".format(self.test_name))
def _print_per_iter(self):
pass
def create_caffe2_op_test_case(op_bench, test_config):
test_case = Caffe2OperatorTestCase(op_bench, test_config)
test_config = test_case.test_config
op = test_case.op_bench
func_name = "{}{}{}".format(op.module_name(), test_case.framework, str(test_config))
return (func_name, test_case)
OpMeta = namedtuple("OpMeta", "op_type num_inputs input_dims input_types \
output_dims num_outputs args device")
def generate_c2_test_from_ops(ops_metadata, bench_op, tags):
"""
This function is used to generate Caffe2 tests based on the metadata
of operators. The metadata includes seven fields which are 1) op_type:
the name of the operator. 2) num_inputs: the number of input blobs.
3) input_dims: a dictionary which includes the shapes of the input blobs.
4) input_types: a list which includes the types of input blobs. 5)
output_dims: a dictionary which includes the shapes of output blobs.
6) num_oupts: the number of output blobs. 7) args: a dictionary which
includes the args for th operator.
Here is an example to show the metadata for the WeighedSum operator
op_type : WeightedSum
num_inputs: 4
input_dims: {'0': [256], '1': [1], '2': [256], '3': [1]}
input_types: ['float', 'float', 'float', 'float']
output_dims: {'0': [256]}
num_outputs: 4
args: {}
TODO(mingzhe0908): introduce device and add it to the benchmark name
"""
for op_metadata in ops_metadata:
tmp_attrs = OpMeta(op_metadata.op_type,
op_metadata.num_inputs,
op_metadata.input_dims,
op_metadata.input_types,
op_metadata.output_dims,
op_metadata.num_outputs,
op_metadata.args,
op_metadata.device)
test_attrs = tmp_attrs._asdict()
op = bench_op()
op.init(**test_attrs)
test_name = op.test_name("short")
input_config = "Shapes: {}, Type: {}, Args: {}".format(
op_metadata.input_dims,
op_metadata.input_types,
str(op_metadata.args))
test_config = TestConfig(test_name, input_config, tags, run_backward=False)
if op is not None:
create_caffe2_op_test_case(
op,
test_config)
def generate_c2_test(configs, c2_bench_op):
""" This function creates Caffe2 op test based on the given operator
"""
return _register_test(configs, c2_bench_op, create_caffe2_op_test_case,
False)
def generate_c2_gradient_test(configs, c2_bench_op):
""" This function creates Caffe2 op test based on the given operator
"""
return _register_test(configs, c2_bench_op, create_caffe2_op_test_case,
True)
| pytorch-master | benchmarks/operator_benchmark/benchmark_caffe2.py |
import time
import json
import torch
import benchmark_cpp_extension # noqa: F401
"""PyTorch performance microbenchmarks.
This module contains PyTorch-specific functionalities for performance
microbenchmarks.
"""
class TorchBenchmarkBase(torch.nn.Module):
""" This is a base class used to create Pytorch operator benchmark.
module_name is the name of the operator being benchmarked.
test_name is the name (it's created by concatenating all the
inputs) of a specific test
"""
def __init__(self):
super(TorchBenchmarkBase, self).__init__()
self.user_given_name = None
self._pass_count = 0
self._num_inputs_require_grads = 0
def _set_backward_test(self, is_backward):
self._is_backward = is_backward
def auto_set(self):
""" This is used to automatically set the require_grad for the backward patch.
It is implemented based on two counters. One counter to save the number of
times init has been called. The other counter to save the number of times
this function itself has been called. In the very first time init is called,
this function counts how many inputs require gradient. In each of the
following init calls, this function will return only one true value.
Here is an example:
...
self.v1 = torch.rand(M, N, K, requires_grad=self.auto_set())
self.v2 = torch.rand(M, N, K, requires_grad=self.auto_set())
...
"""
if not self._is_backward:
return False
if self._pass_count == 0:
self._num_inputs_require_grads += 1
return True
else:
self._auto_set_counter += 1
return (self._pass_count == self._auto_set_counter)
def extract_inputs_tuple(self):
self.inputs_tuple = tuple(self.inputs.values())
@torch.jit.export
def get_inputs(self):
# Need to convert the inputs to tuple outside of JIT so that
# JIT can infer the size of the inputs.
return self.inputs_tuple
@torch.jit.export
def forward_impl(self):
# This is to supply the inputs to the forward function which
# will be called in both the eager and JIT mode of local runs
return self.forward(*self.get_inputs())
@torch.jit.export
def forward_consume(self, iters: int):
# _consume is used to avoid the dead-code-elimination optimization
for _ in range(iters):
torch.ops.operator_benchmark._consume(self.forward_impl())
def module_name(self):
""" this is used to label the operator being benchmarked
"""
if self.user_given_name:
return self.user_given_name
return self.__class__.__name__
def set_module_name(self, name):
self.user_given_name = name
def test_name(self, **kargs):
""" this is a globally unique name which can be used to
label a specific test
"""
# This is a list of attributes which will not be included
# in the test name.
skip_key_list = ['device']
test_name_str = []
for key in kargs:
value = kargs[key]
test_name_str.append(
('' if key in skip_key_list else key)
+ str(value if type(value) != bool else int(value)))
name = (self.module_name() + '_' +
'_'.join(test_name_str)).replace(" ", "")
return name
class PyTorchOperatorTestCase(object):
""" This class includes all the information needed to benchmark an operator.
op_bench: it's a user-defined class (child of TorchBenchmarkBase)
which includes input and operator, .etc
test_config: a namedtuple includes test_name, input_shape, tag, run_backward.
When run_backward is false, the run_forward method will be executed,
When run_backward is true, run_forward_eager and _output_mean will be
executed to generate output. Then, run_backward will be executed.
"""
def __init__(self, op_bench, test_config):
self.test_config = test_config
self.op_bench = op_bench
self.place_holder_tensor = torch.ones(1)
self.framework = "PyTorch"
self.time_series = []
self._jit_forward_graph = None
def _generate_jit_forward_graph(self):
""" generate a graph for the forward function via scripting
"""
scripted_op_bench = torch.jit.script(self.op_bench)
return scripted_op_bench.forward_consume
def run_jit_forward(self, num_runs, print_per_iter=False, cuda_sync=False):
""" Run the forward path of an op with JIT mode
"""
if self._jit_forward_graph is None:
self._jit_forward_graph = self._generate_jit_forward_graph()
self._jit_forward_graph(num_runs)
def _print_per_iter(self):
# print last 50 values
length = min(len(self.time_series), 50)
for i in range(length):
print("PyTorchObserver " + json.dumps(
{
"type": self.test_config.test_name,
"metric": "latency",
"unit": "ms",
"value": str(self.time_series[length - i - 1]),
}
))
def run_forward(self, num_runs, print_per_iter, cuda_sync):
""" Run the forward path of an op with eager mode
"""
if print_per_iter:
for _ in range(num_runs):
start_time = time.time()
self.output = self.op_bench.forward_impl()
if cuda_sync:
torch.cuda.synchronize(torch.cuda.current_device())
end_time = time.time()
self.time_series.append((end_time - start_time) * 1e3)
else:
for _ in range(num_runs):
self.output = self.op_bench.forward_impl()
if cuda_sync:
torch.cuda.synchronize(torch.cuda.current_device())
def _output_mean(self):
""" TODO (mingzhe): it is not necessary to sum up everything by myself,
torch.autograd.backward do take a gradient tensor. By default, it
is the same shape as your output tensor, with all 1s.
Mathematically, it is the same as if the output is summed together.
So we should be able to get ride of this method.
dummy function for gradient calculation
"""
self.mean = self.output.mean()
def run_backward(self, num_runs, print_per_iter=False):
""" Run the backward path of an op in many iterations
"""
# TODO: can we use JIT here to reduce python overhead?
for _ in range(num_runs):
self.mean.backward(retain_graph=True)
def create_pytorch_op_test_case(op_bench, test_config):
""" This method is used to generate est. func_name is a global unique
string. For PyTorch add operator with M=8, N=2, K=1, tag = long, here
are the values for the members in test_case:
op.module_name: add
framework: PyTorch
test_config: TestConfig(test_name='add_M8_N2_K1', input_config='M: 8, N: 2, K: 1',
tag='long', run_backward=False)
func_name: addPyTorchTestConfig(test_name='add_M8_N2_K1', input_config='M: 8, N: 2, K: 1',
tag='long', run_backward=False)
"""
test_case = PyTorchOperatorTestCase(op_bench, test_config)
test_config = test_case.test_config
op = test_case.op_bench
func_name = "{}{}{}".format(op.module_name(), test_case.framework, str(test_config))
return (func_name, test_case)
| pytorch-master | benchmarks/operator_benchmark/benchmark_pytorch.py |
import operator_benchmark as op_bench
from pt import ( # noqa: F401
unary_test,
)
import benchmark_all_other_test # noqa: F401
import benchmark_all_quantized_test # noqa: F401
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/benchmark_all_test.py |
# TODO (mingzhe09088): get rid of noqa
import benchmark_runner # noqa: F401
from benchmark_pytorch import TorchBenchmarkBase # noqa: F401
from benchmark_test_generator import * # noqa: F401,F403
from benchmark_utils import * # noqa: F401,F403
| pytorch-master | benchmarks/operator_benchmark/operator_benchmark.py |
pytorch-master | benchmarks/operator_benchmark/__init__.py |
|
import operator_benchmark as op_bench
from pt import ( # noqa: F401
add_test, as_strided_test, batchnorm_test, binary_test, cat_test,
channel_shuffle_test, chunk_test, conv_test, diag_test, embeddingbag_test,
fill_test, gather_test, linear_test, matmul_test, nan_to_num_test, pool_test,
softmax_test, hardsigmoid_test, hardswish_test, layernorm_test,
groupnorm_test, interpolate_test, instancenorm_test, remainder_test,
split_test, sum_test, tensor_to_test
)
from pt import ( # noqa: F401
ao_sparsifier_test
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/benchmark_all_other_test.py |
import argparse
import torch
import benchmark_core
import benchmark_utils
"""Performance microbenchmarks's main binary.
This is the main function for running performance microbenchmark tests.
It also registers existing benchmark tests via Python module imports.
"""
parser = argparse.ArgumentParser(
description="Run microbenchmarks.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
def parse_args():
parser.add_argument(
'--tag_filter',
help='tag_filter can be used to run the shapes which matches the tag. (all is used to run all the shapes)',
default='short')
# This option is used to filter test cases to run.
parser.add_argument(
'--operators',
help='Filter tests based on comma-delimited list of operators to test',
default=None)
parser.add_argument(
'--operator_range',
help='Filter tests based on operator_range(e.g. a-c or b,c-d)',
default=None)
parser.add_argument(
'--test_name',
help='Run tests that have the provided test_name',
default=None)
parser.add_argument(
'--list_ops',
help='List operators without running them',
action='store_true')
parser.add_argument(
'--list_tests',
help='List all test cases without running them',
action='store_true')
parser.add_argument(
"--iterations",
help="Repeat each operator for the number of iterations",
type=int
)
parser.add_argument(
"--num_runs",
help="Run each test for num_runs. Each run executes an operator for number of <--iterations>",
type=int,
default=1,
)
parser.add_argument(
"--min_time_per_test",
help="Set the minimum time (unit: seconds) to run each test",
type=int,
default=0,
)
parser.add_argument(
"--warmup_iterations",
help="Number of iterations to ignore before measuring performance",
default=100,
type=int
)
parser.add_argument(
"--omp_num_threads",
help="Number of OpenMP threads used in PyTorch/Caffe2 runtime",
default=None,
type=int
)
parser.add_argument(
"--mkl_num_threads",
help="Number of MKL threads used in PyTorch/Caffe2 runtime",
default=None,
type=int
)
parser.add_argument(
"--report_aibench",
type=benchmark_utils.str2bool,
nargs='?',
const=True,
default=False,
help="Print result when running on AIBench"
)
parser.add_argument(
"--use_jit",
type=benchmark_utils.str2bool,
nargs='?',
const=True,
default=False,
help="Run operators with PyTorch JIT mode"
)
parser.add_argument(
"--forward_only",
type=benchmark_utils.str2bool,
nargs='?',
const=True,
default=False,
help="Only run the forward path of operators"
)
parser.add_argument(
'--framework',
help='Comma-delimited list of frameworks to test (Caffe2, PyTorch)',
default="Caffe2,PyTorch")
parser.add_argument(
'--device',
help='Run tests on the provided architecture (cpu, cuda)',
default='None')
args, _ = parser.parse_known_args()
if args.omp_num_threads:
# benchmark_utils.set_omp_threads sets the env variable OMP_NUM_THREADS
# which doesn't have any impact as C2 init logic has already been called
# before setting the env var.
# In general, OMP_NUM_THREADS (and other OMP env variables) needs to be set
# before the program is started.
# From Chapter 4 in OMP standard: https://www.openmp.org/wp-content/uploads/openmp-4.5.pdf
# "Modifications to the environment variables after the program has started,
# even if modified by the program itself, are ignored by the OpenMP implementation"
benchmark_utils.set_omp_threads(args.omp_num_threads)
if benchmark_utils.is_pytorch_enabled(args.framework):
torch.set_num_threads(args.omp_num_threads)
if args.mkl_num_threads:
benchmark_utils.set_mkl_threads(args.mkl_num_threads)
return args
def main():
args = parse_args()
benchmark_core.BenchmarkRunner(args).run()
if __name__ == "__main__":
main()
| pytorch-master | benchmarks/operator_benchmark/benchmark_runner.py |
import numpy as np
import itertools
import random
import os
import bisect
"""Performance microbenchmarks's utils.
This module contains utilities for writing microbenchmark tests.
"""
# Here are the reserved keywords in the benchmark suite
_reserved_keywords = {"probs", "total_samples", "tags"}
_supported_devices = {"cpu", "cuda"}
def shape_to_string(shape):
return ', '.join([str(x) for x in shape])
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def numpy_random(dtype, *shapes):
""" Return a random numpy tensor of the provided dtype.
Args:
shapes: int or a sequence of ints to defining the shapes of the tensor
dtype: use the dtypes from numpy
(https://docs.scipy.org/doc/numpy/user/basics.types.html)
Return:
numpy tensor of dtype
"""
# TODO: consider more complex/custom dynamic ranges for
# comprehensive test coverage.
return np.random.rand(*shapes).astype(dtype)
def set_omp_threads(num_threads):
existing_value = os.environ.get('OMP_NUM_THREADS', '')
if existing_value != '':
print("Overwriting existing OMP_NUM_THREADS value: {}; Setting it to {}.".format(
existing_value, num_threads))
os.environ["OMP_NUM_THREADS"] = str(num_threads)
def set_mkl_threads(num_threads):
existing_value = os.environ.get('MKL_NUM_THREADS', '')
if existing_value != '':
print("Overwriting existing MKL_NUM_THREADS value: {}; Setting it to {}.".format(
existing_value, num_threads))
os.environ["MKL_NUM_THREADS"] = str(num_threads)
def cross_product(*inputs):
"""
Return a list of cartesian product of input iterables.
For example, cross_product(A, B) returns ((x,y) for x in A for y in B).
"""
return (list(itertools.product(*inputs)))
def get_n_rand_nums(min_val, max_val, n):
random.seed((1 << 32) - 1)
return random.sample(range(min_val, max_val), n)
def generate_configs(**configs):
"""
Given configs from users, we want to generate different combinations of
those configs
For example, given M = ((1, 2), N = (4, 5)) and sample_func being cross_product,
we will generate (({'M': 1}, {'N' : 4}),
({'M': 1}, {'N' : 5}),
({'M': 2}, {'N' : 4}),
({'M': 2}, {'N' : 5}))
"""
assert 'sample_func' in configs, "Missing sample_func to generat configs"
result = []
for key, values in configs.items():
if key == 'sample_func':
continue
tmp_result = []
for value in values:
tmp_result.append({key : value})
result.append(tmp_result)
results = configs['sample_func'](*result)
return results
def cross_product_configs(**configs):
"""
Given configs from users, we want to generate different combinations of
those configs
For example, given M = ((1, 2), N = (4, 5)),
we will generate (({'M': 1}, {'N' : 4}),
({'M': 1}, {'N' : 5}),
({'M': 2}, {'N' : 4}),
({'M': 2}, {'N' : 5}))
"""
_validate(configs)
configs_attrs_list = []
for key, values in configs.items():
tmp_results = [{key : value} for value in values]
configs_attrs_list.append(tmp_results)
# TODO(mingzhe0908) remove the conversion to list.
# itertools.product produces an iterator that produces element on the fly
# while converting to a list produces everything at the same time.
generated_configs = list(itertools.product(*configs_attrs_list))
return generated_configs
def _validate(configs):
""" Validate inputs from users."""
if 'device' in configs:
for v in configs['device']:
assert(v in _supported_devices), "Device needs to be a string."
def config_list(**configs):
""" Generate configs based on the list of input shapes.
This function will take input shapes specified in a list from user. Besides
that, all other parameters will be cross producted first and each of the
generated list will be merged with the input shapes list.
Reserved Args:
attr_names(reserved): a list of names for input shapes.
attrs(reserved): a list of values for each input shape.
corss_product: a dictionary of attributes which will be
cross producted with the input shapes.
tags(reserved): a tag used to filter inputs.
Here is an example:
attrs = [
[1, 2],
[4, 5],
],
attr_names = ['M', 'N'],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
we will generate [[{'M': 1}, {'N' : 2}, {'device' : 'cpu'}],
[{'M': 1}, {'N' : 2}, {'device' : 'cuda'}],
[{'M': 4}, {'N' : 5}, {'device' : 'cpu'}],
[{'M': 4}, {'N' : 5}, {'device' : 'cuda'}]]
"""
generated_configs = []
reserved_names = ['attrs', 'attr_names', 'tags']
if any(attr not in configs for attr in reserved_names):
raise ValueError("Missing attrs in configs")
_validate(configs)
cross_configs = None
if 'cross_product_configs' in configs:
cross_configs = cross_product_configs(**configs['cross_product_configs'])
for inputs in configs['attrs']:
tmp_result = [{configs['attr_names'][i] : input_value}
for i, input_value in enumerate(inputs)]
# TODO(mingzhe0908):
# If multiple 'tags' were provided, do they get concat?
# If a config has both ['short', 'medium'], it should match
# both 'short' and 'medium' tag-filter?
tmp_result.append({'tags' : '_'.join(configs['tags'])})
if cross_configs:
generated_configs += [tmp_result + list(config) for config in cross_configs]
else:
generated_configs.append(tmp_result)
return generated_configs
def attr_probs(**probs):
""" return the inputs in a dictionary
"""
return probs
class RandomSample(object):
def __init__(self, configs):
self.saved_cum_distribution = {}
self.configs = configs
def _distribution_func(self, key, weights):
""" this is a cumulative distribution function used for random sampling inputs
"""
if key in self.saved_cum_distribution:
return self.saved_cum_distribution[key]
total = sum(weights)
result = []
cumsum = 0
for w in weights:
cumsum += w
result.append(cumsum / total)
self.saved_cum_distribution[key] = result
return result
def _random_sample(self, key, values, weights):
""" given values and weights, this function randomly sample values based their weights
"""
# TODO(mingzhe09088): cache the results to avoid recalculation overhead
assert len(values) == len(weights)
_distribution_func_vals = self._distribution_func(key, weights)
x = random.random()
idx = bisect.bisect(_distribution_func_vals, x)
assert idx <= len(values), "Wrong index value is returned"
# Due to numerical property, the last value in cumsum could be slightly
# smaller than 1, and lead to the (index == len(values)).
if idx == len(values):
idx -= 1
return values[idx]
def get_one_set_of_inputs(self):
tmp_attr_list = []
for key, values in self.configs.items():
if key in _reserved_keywords:
continue
value = self._random_sample(key, values, self.configs["probs"][str(key)])
tmp_results = {key : value}
tmp_attr_list.append(tmp_results)
return (tmp_attr_list)
def random_sample_configs(**configs):
"""
This function randomly sample <total_samples> values from the given inputs based on
their weights.
Here is an example showing what are the expected inputs and outpus from this function:
M = [1, 2],
N = [4, 5],
K = [7, 8],
probs = attr_probs(
M = [0.7, 0.2],
N = [0.5, 0.2],
K = [0.6, 0.2],
),
total_samples=10,
this function will generate
[
[{'K': 7}, {'M': 1}, {'N': 4}],
[{'K': 7}, {'M': 2}, {'N': 5}],
[{'K': 8}, {'M': 2}, {'N': 4}],
...
]
Note:
The probs is optional. Without them, it implies everything is 1. The probs doesn't
have to reflect the actual normalized probability, the implementation will
normalize it.
TODO (mingzhe09088):
(1): a lambda that accepts or rejects a config as a sample. For example: for matmul
with M, N, and K, this function could get rid of (M * N * K > 1e8) to filter out
very slow benchmarks.
(2): Make sure each sample is unique. If the number of samples are larger than the
total combinations, just return the cross product. Otherwise, if the number of samples
is close to the number of cross-products, it is numerical safer to generate the list
that you don't want, and remove them.
"""
if "probs" not in configs:
raise ValueError("probs is missing. Consider adding probs or"
"using other config functions")
configs_attrs_list = []
randomsample = RandomSample(configs)
for i in range(configs["total_samples"]):
tmp_attr_list = randomsample.get_one_set_of_inputs()
tmp_attr_list.append({"tags" : '_'.join(configs["tags"])})
configs_attrs_list.append(tmp_attr_list)
return configs_attrs_list
def op_list(**configs):
"""Generate a list of ops organized in a specific format.
It takes two parameters which are "attr_names" and "attr".
attrs stores the name and function of operators.
Args:
configs: key-value pairs including the name and function of
operators. attrs and attr_names must be present in configs.
Return:
a sequence of dictionaries which stores the name and function
of ops in a specifal format
Example:
attrs = [
["abs", torch.abs],
["abs_", torch.abs_],
]
attr_names = ["op_name", "op"].
With those two examples,
we will generate (({"op_name": "abs"}, {"op" : torch.abs}),
({"op_name": "abs_"}, {"op" : torch.abs_}))
"""
generated_configs = []
if "attrs" not in configs:
raise ValueError("Missing attrs in configs")
for inputs in configs["attrs"]:
tmp_result = {configs["attr_names"][i] : input_value
for i, input_value in enumerate(inputs)}
generated_configs.append(tmp_result)
return generated_configs
def is_caffe2_enabled(framework_arg):
return 'Caffe2' in framework_arg
def is_pytorch_enabled(framework_arg):
return 'PyTorch' in framework_arg
def get_operator_range(chars_range):
"""Generates the characters from chars_range inclusive."""
if chars_range == 'None' or chars_range is None:
return None
if all(item not in chars_range for item in [',', '-']):
raise ValueError("The correct format for operator_range is "
"<start>-<end>, or <point>, <start>-<end>")
ops_start_chars_set = set()
ranges = chars_range.split(',')
for item in ranges:
if len(item) == 1:
ops_start_chars_set.add(item.lower())
continue
start, end = item.split("-")
for c in range(ord(start), ord(end) + 1):
ops_start_chars_set.add(chr(c).lower())
return ops_start_chars_set
def process_arg_list(arg_list):
if arg_list == 'None':
return None
return [fr.strip() for fr in arg_list.split(',') if len(fr.strip()) > 0]
| pytorch-master | benchmarks/operator_benchmark/benchmark_utils.py |
import functools
import numpy as np
import timeit
import json
import torch
import copy
import ast
# needs to be imported after torch
import torch.utils.cpp_extension as cpp_extension # noqa: F401
import benchmark_utils
from collections import namedtuple
"""Performance microbenchmarks.
This module contains core functionalities for performance microbenchmark tests.
"""
"""
This is used to store configs of tests
An example input is:
TestConfig(test_name='add_M8_N2_K1', input_config='M: 8, N: 2, K: 1',
tag='long', run_backward=False)
"""
TestConfig = namedtuple("TestConfig", "test_name input_config tag run_backward")
BENCHMARK_TESTER = []
def _register_test(*test_metainfo):
""" save the metainfo needed to create a test. Currently test_metainfo
takes two different inputs:
1) This input when adds single op to the benchmark
_register_test(configs, pt_bench_op, create_pytorch_op_test_case,
run_backward=True)
2) This input when addes a list of ops to the benchmark
_register_test(configs, pt_bench_op, create_pytorch_op_test_case,
run_backward=False,
op_name_function=op)
"""
BENCHMARK_TESTER.append(test_metainfo)
def _create_test(bench_op_obj, orig_test_attrs, tags, OperatorTestCase, run_backward, bwd_input):
""" Create tests with the benchmark backend.
Args:
bench_op_obj: an object which instantiated from a subclass of
Caffe2BenchmarkBase/TorchBenchmarkBase which includes tensor
creation and operator execution.
test_attrs: a dictionary includes test configs.
tags: a attribute in test config to filter inputs
OperatorTestCase: a named tuple to save the metadata of an test
run_backward: a bool parameter indicating backward path
"""
test_attrs = copy.deepcopy(orig_test_attrs)
test_attrs = {k: str(v) for k, v in test_attrs.items()}
ascii_test_attrs = ast.literal_eval(json.dumps(test_attrs))
input_config = str(ascii_test_attrs)[1:-1].replace('\'', '')
if bwd_input:
# When auto_set is used, the test name needs to include input.
test_attrs.update({'bwd': bwd_input})
test_name = bench_op_obj.test_name(**test_attrs)
test_config = TestConfig(test_name, input_config, tags, run_backward)
return OperatorTestCase(bench_op_obj, test_config)
def _build_test(configs, bench_op, OperatorTestCase, run_backward, op_name_function=None):
"""Generate PyTorch/Caffe2 tests of operators with different inputs.
Args:
configs: a dictionary that has the input shapes
bench_op: a subclass of Caffe2BenchmarkBase/TorchBenchmarkBase which includes tensor
creation and operator execution
OperatorTestCase: a named tuple to save the metadata of an test
run_backward: a bool parameter indicating backward path
op_name_function: a dictionary includes operator name and function
"""
for config in configs:
test_attrs = {}
tags = None
keep_config = True
for attr in config:
# tags is only used in our benchmark backend to filter tests and
# it will be removed from config which is then passed to the init function
# an example of config and atrr is:
# config: [{'M': 16}, {'N': 16}, {'K': 64}, {'tags': 'short'}]
# attr: {'tags': 'short'}
if "tags" in attr:
tags = attr["tags"]
continue
# if 'cuda' is specified in input shape but the testing machines doesn't
# support, we will skip this input
if 'cuda' in attr.values():
if not torch.cuda.is_available():
keep_config = False
break
test_attrs.update(attr)
if not keep_config:
continue
if tags is None:
raise ValueError("Missing tags in configs")
input_config = str(test_attrs)[1:-1].replace('\'', '')
op = bench_op()
assert op is not None, "Can't create test"
tensor_error_info = None
# op_name_function is a dictionary which has op_name and op_function.
# an example of op_name_function is:
# {'op_name' : 'abs', 'op_function' : torch.abs}
# op_function is concatenated with the input dict then passed to the init function
# op_name is passed to the set_module_name function
init_dict = copy.deepcopy(test_attrs)
if op_name_function is not None:
op_name = op_name_function['op_name']
init_dict.update({'op_func' : op_name_function['op_func']})
op.set_module_name(op_name)
op._set_backward_test(run_backward)
op.init(**init_dict)
op.extract_inputs_tuple()
if not run_backward:
for _, attr in vars(op).items():
if isinstance(attr, torch.nn.Module):
for param in attr.parameters():
param.requires_grad = False
input_name = None
# _num_inputs_require_grads is used to track the number of tensors
# which use auto_set().
if op._num_inputs_require_grads > 0:
input_name = 'all'
yield _create_test(op, test_attrs, tags, OperatorTestCase, run_backward, input_name)
# This for loop is only used when auto_set is used.
# _pass_count counts how many times init has been called.
# _auto_set_counter is reset after init is called.
for i in range(op._num_inputs_require_grads):
op._pass_count += 1
op._auto_set_counter = 0
# TODO(mingzhe09088): remove this deepcopy when we encounter
# performance issue.
new_op = copy.deepcopy(op)
new_op.init(**init_dict)
# Input name index will start from input1
input_name = i + 1
yield _create_test(new_op, test_attrs, tags, OperatorTestCase, run_backward, input_name)
class BenchmarkRunner(object):
"""BenchmarkRunner is responsible for benchmarking all the registered
benchmark test groups.
Attributes:
tag_filter (str): control the benchmarks which matches the tag.
operator (str): only run benchmark test cases that contains
this filter string in the test case's id.
test_name (str): only run benchmark test cases that matches this filter,
this is a case-sensitive substring match and it happens in
the _keep_test method.
"""
def __init__(self, args):
# TODO: consider time-bound constraints as well.
self.args = args
self.iters = 100
self.has_explicit_iteration_count = False
self.multiplier = 2
self.predefined_minimum_secs = 1
self.max_iters = 1e6
self.use_jit = args.use_jit
self.num_runs = args.num_runs
self.print_per_iter = False
self.operator_range = benchmark_utils.get_operator_range(args.operator_range)
# 100 is the default warmup iterations
if self.args.warmup_iterations == -1:
self.args.warmup_iterations = 100
if self.args.iterations and self.args.iterations != -1:
self.has_explicit_iteration_count = True
self.iters = self.args.iterations
# when a specific test is selected by a user, we don't need
# to match the tag anymore
if self.args.test_name is not None:
self.args.tag_filter = None
def _print_header(self):
DASH_LINE = '-' * 40
print("# {}\n"
"# PyTorch/Caffe2 Operator Micro-benchmarks\n"
"# {}\n"
"# Tag : {}\n".format(DASH_LINE, DASH_LINE, self.args.tag_filter))
if self.args.list_tests:
print("# List of tests:")
elif self.args.list_ops:
print("# List of Operators to run:")
self.printed_ops_list = set()
if self.args.operators:
print("# {}".format(self.args.operators))
def _print_perf_result(self, reported_run_time_us, test_case):
if self.args.report_aibench:
# Output for AIBench
# Print out per iteration execution time instead of avg time
return
test_name = '_'.join([test_case.framework, test_case.test_config.test_name])
for run in range(self.num_runs):
print("{}Observer ".format(test_case.framework) + json.dumps(
{
"type": test_name,
"metric": "latency",
"unit": "us",
"value": str(reported_run_time_us[run]),
}
))
else:
if test_case.framework == "PyTorch":
print("# Mode: {}".format("JIT" if self.use_jit else "Eager"))
print("# Name: {}\n"
"# Input: {}".format(
test_case.test_config.test_name,
test_case.test_config.input_config))
mode = "Backward" if test_case.test_config.run_backward else "Forward"
if self.num_runs > 1:
for run in range(self.num_runs):
print("Run: {}, {} Execution Time (us) : {:.3f}".format(
run,
mode, reported_run_time_us[run]))
print()
else:
print("{} Execution Time (us) : {:.3f}\n".format(
mode, reported_run_time_us[0]))
def _predict_num_iter_needed(self, i):
return (i * self.multiplier)
def _iteration_result_is_significant(self, iters, run_time_sec, curr_test_total_time, has_explicit_iteration_count):
""" This function decides whether the measured time can be reported based on the
following conditions: 1) the number of iterations is larger than the max_iters.
2) the execution time is larger than the predefined minimum_time
3) the execution time is larger than user defined minimum_time
"""
return ((iters > self.max_iters or
run_time_sec > self.predefined_minimum_secs or
has_explicit_iteration_count) and
curr_test_total_time > self.args.min_time_per_test)
def _launch_forward(self, test_case, iters, print_per_iter):
""" Use Python's timeit module to measure execution time (unit: second).
"""
cuda_sync = 'cuda' in test_case.test_config.test_name
func = test_case.run_forward
if self.use_jit:
func = test_case.run_jit_forward
forward_time = timeit.timeit(functools.partial(func, iters, print_per_iter, cuda_sync), number=1)
return forward_time
def _launch_backward(self, test_case, iters, print_per_iter=False):
""" This function runs forward path of an op to get an output. Then the backward path is executed
and the execution time is reported
"""
test_case.run_forward(num_runs=1, print_per_iter=False, cuda_sync=False)
if test_case.framework == "PyTorch":
test_case._output_mean()
backward_time = timeit.timeit(functools.partial(test_case.run_backward, iters,
print_per_iter),
number=1)
return backward_time
def _measure_time(self, launch_test, test_case, iters, print_per_iter):
"""
This function execute the operator for <iters> iterations then look at the time.
If it's not significant, the number of iterations will be increased before rerun.
The execution stops when the time becomes significant.
"""
curr_test_total_time = 0
time_trace = []
while True:
run_time_sec = launch_test(test_case, iters, print_per_iter)
curr_test_total_time += run_time_sec
# Analyze time after each run to decide if the result is stable
results_are_significant = self._iteration_result_is_significant(
iters, run_time_sec, curr_test_total_time, self.has_explicit_iteration_count)
report_run_time = 1e6 * run_time_sec / iters
time_trace.append(report_run_time)
# Print out the time spent in each epoch in ms
if self.args.report_aibench:
mode = "JIT" if self.use_jit else "Eager"
test_name = '_'.join([test_case.framework, test_case.test_config.test_name, mode])
print("PyTorchObserver " + json.dumps(
{
"type": test_name,
"metric": "latency",
"unit": "ms",
"value": str(report_run_time / 1e3),
}
))
if results_are_significant:
break
# Re-estimate the hopefully-sufficient
# iteration count, and run the benchmark again...
iters = self._predict_num_iter_needed(iters)
reported_run_time_us = np.percentile(np.array(time_trace), 50)
return reported_run_time_us
def _check_keep(self, test_flag, cmd_flag):
return (cmd_flag is None or test_flag == cmd_flag)
def _check_operator_first_char(self, test_flag, cmd_flag):
if cmd_flag is None or test_flag[:1].lower() in cmd_flag:
return True
return False
def _check_keep_list(self, test_flag, cmd_flag_list):
if (cmd_flag_list is None or
any(test_flag == cmd_flag for cmd_flag in cmd_flag_list)):
return True
return False
def _keep_test(self, test_case):
# TODO: consider regex matching for test filtering.
# Currently, this is a sub-string matching.
op_test_config = test_case.test_config
if self.args.framework:
frameworks = benchmark_utils.process_arg_list(self.args.framework)
operators = benchmark_utils.process_arg_list(self.args.operators) if self.args.operators else None
# Filter framework, operator, test_name, tag, forward_only
if (self._check_keep(op_test_config.test_name, self.args.test_name) and
self._check_keep_list(test_case.op_bench.module_name(), operators) and
self._check_keep_list(test_case.framework, frameworks) and
self._check_operator_first_char(test_case.op_bench.module_name(), self.operator_range) and
(self.args.tag_filter == 'all' or
self._check_keep(op_test_config.tag, self.args.tag_filter)) and
(not self.args.forward_only or op_test_config.run_backward != self.args.forward_only) and
(self.args.device == 'None' or 'device' not in test_case.test_config.input_config or
self.args.device in op_test_config.test_name)):
return True
return False
def _print_test_case_info(self, test_case):
# Print out the test name and skip the real execution
if self.args.list_tests:
print("# {}".format(test_case.test_config.test_name))
return True
elif self.args.list_ops:
if self.args.operators is None:
op_name = test_case.op_bench.module_name()
if op_name not in self.printed_ops_list:
print("# {}".format(op_name))
self.printed_ops_list.add(op_name)
return True
return False
def run(self):
self._print_header()
for test_metainfo in BENCHMARK_TESTER:
for test in _build_test(*test_metainfo):
full_test_id, test_case = test
op_test_config = test_case.test_config
if self._print_test_case_info(test_case):
continue
if not self._keep_test(test_case):
continue
# To reduce variance, fix a numpy randseed to the test case,
# so that the randomly generated input tensors remain the
# same for each test case.
# The random seed is limited to 32-bit because of numpy
# requirement.
np.random.seed(seed=hash(full_test_id) & ((1 << 32) - 1))
print("# Benchmarking {}: {}".format(
test_case.framework,
test_case.op_bench.module_name()))
if op_test_config.run_backward:
launch_func = self._launch_backward
else:
launch_func = self._launch_forward
# Warmup
launch_func(test_case, self.args.warmup_iterations, print_per_iter=False)
# Actual Execution
reported_time = [self._measure_time(launch_func, test_case,
self.iters, self.print_per_iter)
for _ in range(self.num_runs)]
self._print_perf_result(reported_time, test_case)
| pytorch-master | benchmarks/operator_benchmark/benchmark_core.py |
import operator_benchmark as op_bench
from pt import ( # noqa: F401
qactivation_test,
qarithmetic_test,
qbatchnorm_test,
qcat_test,
qcomparators_test,
qconv_test,
qgroupnorm_test,
qinstancenorm_test,
qinterpolate_test,
qlayernorm_test,
qlinear_test,
qobserver_test,
qpool_test,
qrnn_test,
qtensor_method_test,
quantization_test,
qunary_test,
qembedding_pack_test,
qembeddingbag_test,
qatembedding_ops_test,
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/benchmark_all_quantized_test.py |
from setuptools import setup
from torch.utils.cpp_extension import CppExtension, BuildExtension
setup(name='benchmark_cpp_extension',
ext_modules=[CppExtension('benchmark_cpp_extension', ['extension.cpp'])],
cmdclass={'build_ext': BuildExtension})
| pytorch-master | benchmarks/operator_benchmark/pt_extension/setup.py |
import unittest
import benchmark_cpp_extension # noqa: F401
import torch
class TestConsumeOp(unittest.TestCase):
def test_jit_consume_op(self):
iters = 6
def foo(x):
for i in range(iters):
result = torch.ops.operator_benchmark._consume(torch.sum(x))
return result
r = torch.jit.trace(foo, (torch.rand(2, 2)))
graph = str(r.graph)
occurance = graph.count("aten::sum")
x = torch.rand(2, 2)
value = r(x)
self.assertEqual(value, torch.sum(x))
self.assertEqual(occurance, iters)
def test_jit_consume_op_for_list_input(self):
iters = 6
def foo(x):
for i in range(iters):
result = torch.ops.operator_benchmark._consume(torch.chunk(x, 2))
return result
r = torch.jit.trace(foo, torch.rand(2, 2))
graph = str(r.graph)
occurance = graph.count("aten::chunk")
x = torch.rand(2, 2)
value = r(x)
self.assertTrue(
all([torch.allclose(t1, t2) for t1, t2 in zip(value, torch.chunk(x, 2))])
)
self.assertEqual(occurance, iters)
| pytorch-master | benchmarks/operator_benchmark/pt_extension/cpp_extension_test.py |
pytorch-master | benchmarks/operator_benchmark/common/__init__.py |
|
import numpy as np
import torch
import time
"""Microbenchmarks for Tensor repeat operator. Supports PyTorch."""
input_shapes = (
(4, 4, 1),
(16, 1, 32),
(64, 64, 1, 1),
(8, 256, 128),
(1, 64, 128, 32),
(512, 512),
)
repeats = (
(1, 1, 1, 64),
(1, 4, 1, 2),
(1, 2, 2, 15),
(1, 1, 3, 2),
(128, 1, 8, 1),
(1, 1, 2, 16),
)
NUM_WARMUP_ITERS = 5
NUM_BENCHMARK_ITERS = 10
DTYPE_TO_BYTES = {'float' : 4}
def generate_data_for_repeat():
input_tensors = [torch.randn(*input_shape) for input_shape in input_shapes]
total_num_elements = 0
for input_tensor, repeat in zip(input_tensors, repeats):
total_num_elements += input_tensor.numel()
total_num_elements += input_tensor.numel() * np.prod(repeat)
return input_tensors, (total_num_elements * DTYPE_TO_BYTES['float'])
input_tensors, total_bytes = generate_data_for_repeat()
BYTES_TO_MB = (1. / 1000. / 1000.)
def pt_repeat(input_tensor, repeat):
return input_tensor.repeat(repeat)
def pt_repeat_n_times(niters):
for _ in range(niters):
for input_tensor, repeat in zip(input_tensors, repeats):
pt_repeat(input_tensor, repeat)
if __name__ == "__main__":
# Warm up runs.
pt_repeat_n_times(NUM_WARMUP_ITERS)
s = time.time()
pt_repeat_n_times(NUM_BENCHMARK_ITERS)
total_time_s = (time.time() - s)
total_time_per_iter_s = total_time_s / NUM_BENCHMARK_ITERS
achieved_bandwidth = (total_bytes * BYTES_TO_MB) / total_time_per_iter_s
print("Time:{} Achieved Bandwidth:{} MB/s".format(total_time_per_iter_s, achieved_bandwidth))
| pytorch-master | benchmarks/operator_benchmark/common/repeat_benchmark.py |
import operator_benchmark as op_bench
import torch
intraop_bench_configs = op_bench.config_list(
attrs=[
[8, 16],
],
attr_names=["M", "N"],
tags=["short"],
)
@torch.jit.script
def torch_sumall(a, iterations):
# type: (Tensor, int)
result = 0.0
for _ in range(iterations):
result += float(torch.sum(a))
a[0][0] += 0.01
return result
class TorchSumBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N):
self.input_one = torch.rand(M, N)
self.set_module_name("sum")
# This is a very temporary method and will be removed soon, so
# don't use this method in your benchmark
# TODO(mingzhe): use one forward method for both JIT and Eager
def jit_forward(self, iters):
return torch_sumall(self.input_one, iters)
op_bench.generate_pt_test(intraop_bench_configs, TorchSumBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/common/tests/jit_forward_test.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for element-wise Add operator. Supports both Caffe2/PyTorch."""
add_short_configs = op_bench.config_list(
attr_names=['M', 'N', 'K'],
attrs=[
[8, 16, 32],
[16, 16, 64],
[64, 64, 128],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
'dtype': [torch.float, torch.float64],
},
tags=['short'],
)
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device, dtype):
self.input_one = torch.rand(M, N, K, device=device, dtype=dtype, requires_grad=True)
self.input_two = torch.rand(M, N, K, device=device, dtype=dtype)
self.set_module_name('add')
def forward(self):
return torch.add(self.input_one, self.input_two)
op_bench.generate_pt_test(add_short_configs, AddBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/common/tests/pt_configs_list_test.py |
import operator_benchmark as op_bench
import torch
configs = op_bench.random_sample_configs(
M=[1, 2, 3, 4, 5, 6],
N=[7, 8, 9, 10, 11, 12],
K=[13, 14, 15, 16, 17, 18],
# probs saves the weights of each value
probs=op_bench.attr_probs(
M=[0.5, 0.2, 0.1, 0.05, 0.03, 0.1],
N=[0.1, 0.3, 0.4, 0.02, 0.03, 0.04],
K=[0.03, 0.6, 0.04, 0.02, 0.03, 0.01],
),
# this is the number of returned inputs
total_samples=10,
tags=["short"],
)
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K):
self.input_one = torch.rand(M, N, K)
self.input_two = torch.rand(M, N, K)
self.set_module_name("add")
def forward(self):
return torch.add(self.input_one, self.input_two)
op_bench.generate_pt_test(configs, AddBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/common/tests/random_sample_test.py |
import operator_benchmark as op_bench
import torch
add_configs = op_bench.cross_product_configs(
M=[8],
N=[8],
K=[8],
device=["cuda", "cpu"],
tags=["short"]
)
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
self.input_one = torch.rand(M, N, K, device=device, requires_grad=True)
self.input_two = torch.rand(M, N, K, device=device, requires_grad=True)
self.set_module_name("add")
def forward(self):
return torch.add(self.input_one, self.input_two)
op_bench.generate_pt_test(add_configs, AddBenchmark)
op_bench.generate_pt_gradient_test(add_configs, AddBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/common/tests/pt_cpu_gpu_forward_backward_test.py |
import operator_benchmark as op_bench
from caffe2.python import core
add_configs = op_bench.cross_product_configs(
M=[8],
N=[8],
K=[8],
tags=["short"],
device=["cuda", "cpu"]
)
class AddBenchmark(op_bench.Caffe2BenchmarkBase):
def init(self, M, N, K, device):
self.set_module_name("add")
self.input_one = self.tensor([M, N, K], device=device)
self.input_two = self.tensor([M, N, K], device=device)
self.input_one_grad = self.tensor([M, N, K], device=device)
self.input_two_grad = self.tensor([M, N, K], device=device)
self.output = self.tensor([M, N, K], device=device)
def forward(self):
op = core.CreateOperator(
"Add", [self.input_one, self.input_two], self.output, **self.args
)
return op
def backward(self):
grad_op = core.CreateOperator(
"AddGradient", [self.output, self.input_one, self.input_two],
[self.input_one_grad, self.input_two_grad], **self.args
)
return grad_op
op_bench.generate_c2_test(add_configs, AddBenchmark)
op_bench.generate_c2_gradient_test(add_configs, AddBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/common/tests/c2_cpu_gpu_forward_backward_test.py |
import operator_benchmark as op_bench
import torch
# Configs for pointwise unary ops
unary_ops_configs = op_bench.config_list(
attrs=[
[128, 128],
],
attr_names=["M", "N"],
tags=["short"]
)
unary_ops_list = op_bench.op_list(
attr_names=["op_name", "op_func"],
attrs=[
["abs", torch.abs],
["acos", torch.acos],
],
)
class UnaryOpBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, op_func):
self.input_one = torch.rand(M, N)
self.op_func = op_func
def forward(self):
return self.op_func(self.input_one)
op_bench.generate_pt_tests_from_op_list(unary_ops_list, unary_ops_configs, UnaryOpBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/common/tests/add_ops_list_test.py |
import operator_benchmark as op_bench
import torch
add_configs = op_bench.cross_product_configs(
M=[8, 1],
N=[8, 2],
K=[8, 4],
tags=["short"]
)
# This benchmark uses the auto_set to automatically set requires_grad
# for both inputs. The test name can also be used for filtering.
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K):
self.input_one = torch.rand(M, N, K, requires_grad=self.auto_set())
self.input_two = torch.rand(M, N, K, requires_grad=self.auto_set())
self.set_module_name("add")
def forward(self):
return torch.add(self.input_one, self.input_two)
op_bench.generate_pt_test(add_configs, AddBenchmark)
op_bench.generate_pt_gradient_test(add_configs, AddBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/common/tests/pt_backward_test.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for quantized layernorm operator."""
layernorm_configs_short = op_bench.cross_product_configs(
dims=(
(1, 8, 16),
(8, 8, 16),
(32, 8, 16),
(64, 128, 56, 56),
),
dtype=(torch.qint8,),
tags=["short"],
)
class QLayerNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims, dtype):
X = (torch.rand(*dims) - 0.5) * 256
scale = 1.0
zero_point = 0
self.qX = torch.quantize_per_tensor(
X, scale=scale, zero_point=zero_point, dtype=dtype)
self.inputs = {
"qX": self.qX,
"weight": torch.rand(*self.qX.size()[1:], dtype=torch.float),
"bias": torch.rand(*self.qX.size()[1:], dtype=torch.float),
"eps": 1e-5,
"Y_scale": 0.1,
"Y_zero_point": 0
}
def forward(self, qX, weight, bias, eps: float, Y_scale: float, Y_zero_point: int):
return torch.ops.quantized.layer_norm(
qX, qX.size()[1:], weight=weight, bias=bias,
eps=eps, output_scale=Y_scale,
output_zero_point=Y_zero_point)
op_bench.generate_pt_test(layernorm_configs_short, QLayerNormBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/qlayernorm_test.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for diag operator"""
# Configs for PT diag operator
diag_configs_short = op_bench.config_list(
attr_names=['dim', 'M', 'N', 'diagonal', 'out'],
attrs=[
[1, 64, 64, 0, True],
[2, 128, 128, -10, False],
[1, 256, 256, 20, True],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=['short'],
)
class DiagBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dim, M, N, diagonal, out, device):
self.inputs = {
"input": torch.rand(M, N, device=device) if dim == 2 else torch.rand(M, device=device),
"diagonal": diagonal,
"out": out,
"out_tensor": torch.tensor((),)
}
self.set_module_name('diag')
def forward(self, input, diagonal: int, out: bool, out_tensor):
if out:
return torch.diag(input, diagonal=diagonal, out=out_tensor)
else:
return torch.diag(input, diagonal=diagonal)
op_bench.generate_pt_test(diag_configs_short, DiagBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/diag_test.py |
import operator_benchmark as op_bench
import torch
import numpy
from pt import configs
"""Embedding and EmbeddingBag Operator Benchmark"""
class EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase):
def init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset, device):
self.embedding = torch.nn.EmbeddingBag(
num_embeddings=embeddingbags,
embedding_dim=dim,
mode=mode,
include_last_offset=include_last_offset,
sparse=sparse).to(device=device)
numpy.random.seed((1 << 32) - 1)
offsets = torch.LongTensor([offset], device=device)
input = torch.tensor(numpy.random.randint(0, embeddingbags, input_size), device=device).long()
self.inputs = {
"input": input,
"offset": torch.cat((offsets, torch.tensor([input.size(0)], dtype=torch.long)), 0)
}
self.set_module_name('embeddingbag')
def forward(self, input, offset):
return self.embedding(input, offset)
op_bench.generate_pt_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark)
op_bench.generate_pt_gradient_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark)
class EmbeddingBenchmark(op_bench.TorchBenchmarkBase):
def init(self, num_embeddings, embedding_dim, input_size, device):
self.embedding = torch.nn.Embedding(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim).to(device=device)
numpy.random.seed((1 << 32) - 1)
input = torch.tensor(numpy.random.randint(0, num_embeddings, input_size), device=device).long()
self.inputs = {"input": input}
self.set_module_name('embedding')
def forward(self, input):
return self.embedding(input)
op_bench.generate_pt_test(configs.embedding_short_configs, EmbeddingBenchmark)
op_bench.generate_pt_gradient_test(configs.embedding_short_configs, EmbeddingBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/embeddingbag_test.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for quantized groupnorm operator."""
groupnorm_configs_short = op_bench.cross_product_configs(
dims=(
(32, 8, 16),
(32, 8, 56, 56),
),
num_groups=(2, 4),
dtype=(torch.qint8,),
tags=["short"],
)
class QGroupNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims, num_groups, dtype):
X = (torch.rand(*dims) - 0.5) * 256
num_channels = dims[1]
scale = 1.0
zero_point = 0
self.inputs = {
"qX": torch.quantize_per_tensor(
X, scale=scale, zero_point=zero_point, dtype=dtype),
"num_groups": num_groups,
"weight": torch.rand(num_channels, dtype=torch.float),
"bias": torch.rand(num_channels, dtype=torch.float),
"eps": 1e-5,
"Y_scale": 0.1,
"Y_zero_point": 0
}
def forward(self, qX, num_groups: int, weight, bias, eps: float, Y_scale: float, Y_zero_point: int):
return torch.ops.quantized.group_norm(
qX, num_groups, weight=weight, bias=bias,
eps=eps, output_scale=Y_scale,
output_zero_point=Y_zero_point)
op_bench.generate_pt_test(groupnorm_configs_short, QGroupNormBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/qgroupnorm_test.py |
import operator_benchmark as op_bench
import torch
import torch.nn.quantized as nnq
from typing import List
"""Microbenchmarks for quantized Cat operator"""
# Configs for PT Cat operator
qcat_configs_short = op_bench.config_list(
attr_names=['M', 'N', 'K', 'L', 'dim'],
attrs=[
[256, 512, 1, 2, 0],
[512, 512, 2, 1, 1],
],
cross_product_configs={
'contig': ('all', 'one', 'none'),
'dtype': (torch.quint8, torch.qint8, torch.qint32),
},
tags=['short'],
)
qcat_configs_long = op_bench.cross_product_configs(
M=[128, 1024],
N=[128, 1024],
K=[1, 2],
L=[5, 7],
dim=[0, 1, 2],
contig=['all', 'one', 'none'],
dtype=[torch.quint8],
tags=['long']
)
class QCatBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, L, dim, contig, dtype):
f_input = (torch.rand(M, N, K) - 0.5) * 256
self.qf = nnq.QFunctional()
scale = 1.0
zero_point = 0
self.qf.scale = scale
self.qf.zero_point = zero_point
assert(contig in ('none', 'one', 'all'))
q_input = torch.quantize_per_tensor(f_input, scale, zero_point, dtype)
permute_dims = tuple(range(q_input.ndim - 1, -1, -1))
q_input_non_contig = q_input.permute(permute_dims).contiguous()
q_input_non_contig = q_input_non_contig.permute(permute_dims)
if contig == 'all':
self.input = (q_input, q_input)
elif contig == 'one':
self.input = (q_input, q_input_non_contig)
elif contig == 'none':
self.input = (q_input_non_contig, q_input_non_contig)
self.inputs = {
"input": self.input,
"dim": dim
}
self.set_module_name('qcat')
def forward(self, input: List[torch.Tensor], dim: int):
return self.qf.cat(input, dim=dim)
op_bench.generate_pt_test(qcat_configs_short + qcat_configs_long,
QCatBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/qcat_test.py |
import operator_benchmark as op_bench
import torch
import torch.nn.quantized as nnq
import torch.ao.quantization as tq
import torch.nn as nn
"""Microbenchmarks for general quantization operations."""
# mode is used to show the direction of the benchmark:
# if 'Q', benchmark quantization, else dequantization
quantize_configs_short_dict = {
'attr_names': ['C', 'M', 'N', 'dtype', 'mode'],
'attrs': [
[3, 512, 512, torch.quint8, 'Q'],
[3, 512, 512, torch.quint8, 'D'],
],
'tags': ['short'],
}
quantize_configs_long_dict = {
'C': [3, 5, 8], # this is reused for per-channel: avoid single channel test
'M': [256, 1024],
'N': [256, 1024],
'dtype': [torch.quint8, torch.qint8, torch.qint32],
'mode': ['D', 'Q'],
'tags': ['long'],
}
quantize_per_tensor_configs_short = op_bench.config_list(
**quantize_configs_short_dict
)
quantize_per_tensor_configs_long = op_bench.cross_product_configs(
**quantize_configs_long_dict
)
class QuantizePerTensorBenchmark(op_bench.TorchBenchmarkBase):
r"""Benchmarks both quantization and dequantization."""
def init(self, C, M, N, dtype, mode):
assert(mode in ('Q', 'D'))
self.input = torch.rand(C, M, N)
self.dtype = dtype
self.op = nnq.Quantize(scale=1.0, zero_point=0, dtype=dtype)
self.set_module_name('QuantizePerTensor')
if mode == 'D':
self.input = self.op(self.input)
self.op = nnq.DeQuantize()
self.set_module_name('DequantizePerTensor')
self.inputs = {
"input": self.input
}
def forward(self, input):
return self.op(input)
op_bench.generate_pt_test(
quantize_per_tensor_configs_short + quantize_per_tensor_configs_long,
QuantizePerTensorBenchmark)
# === Per Channel quantization ===
quantize_per_channel_configs_short = op_bench.config_list(
cross_product_configs={
'axis': (0,)
},
**quantize_configs_short_dict
)
quantize_per_channel_configs_long = op_bench.cross_product_configs(
axis=(0, 1, 2),
**quantize_configs_long_dict
)
class QuantizePerChannelBenchmark(op_bench.TorchBenchmarkBase):
r"""Benchmarks both quantization and dequantization."""
def init(self, C, M, N, dtype, axis, mode):
assert(mode in ('Q', 'D'))
self.input = torch.rand(C, M, N)
self.op = torch.quantize_per_channel
channel_len = (C, M, N)[axis]
self.kwargs = {
'scales': torch.tensor([1.0] * channel_len),
'zero_points': torch.tensor([0] * channel_len),
'dtype': dtype,
'axis': axis
}
self.set_module_name('QuantizePerChannel')
if mode == 'D':
self.input = self.op(self.input, **self.kwargs)
def dequant(input, scales, zero_points, axis: int, dtype: int):
return input.dequantize()
self.op = dequant
self.set_module_name('DequantizePerChannel')
self.inputs = {
"input": self.input,
'scales': torch.tensor([1.0] * channel_len),
'zero_points': torch.tensor([0] * channel_len),
'axis': axis,
'dtype': dtype
}
def forward(self, input, scales, zero_points, axis: int, dtype: int):
return self.op(input, scales=scales, zero_points=zero_points, axis=axis, dtype=dtype)
op_bench.generate_pt_test(
quantize_per_channel_configs_short + quantize_per_channel_configs_long,
QuantizePerChannelBenchmark)
# === Fake Quantization ===
# Generated benchmarks names start with 'learnable_kernel' or 'original_kernel',
# for ex. 'original_kernel_nbits8_cpu_N1_C1_H256_W256_zero_point_dtypetorch.int32_bwdall'
fake_quantize_configs_short_dict = {
'attr_names': ['N', 'C', 'H', 'W', 'zero_point_dtype'],
'attrs': [
[1, 3, 512, 512, torch.int32],
],
'tags': ['short']
}
fake_quantize_configs_long_dict = {
'N': [1],
'C': [1, 3, 8, 32],
'H': [256, 1024],
'W': [256, 1024],
'zero_point_dtype': [torch.int32],
'tags': ['long']
}
fake_quantize_configs_short = op_bench.config_list(
cross_product_configs={
'device': ('cpu', 'cuda'),
},
**fake_quantize_configs_short_dict
)
fake_quantize_configs_long = op_bench.cross_product_configs(
device=('cpu', 'cuda'),
**fake_quantize_configs_long_dict
)
class FakeQuantizeBenchmark(op_bench.TorchBenchmarkBase):
r"""Benchmarks fake quantization with default parameters."""
def init(self, N, C, H, W, zero_point_dtype, device):
self.inputs = {
"input": torch.rand(N, C, H, W).to(device)
}
self.op = tq.FakeQuantize().to(device)
self.set_module_name('FakeQuantize')
def forward(self, input):
return self.op(input)
op_bench.generate_pt_test(
fake_quantize_configs_short + fake_quantize_configs_long,
FakeQuantizeBenchmark)
# op_type is used to describe the type of operator used in benchmarking:
# learnable_kernel represents the c++ kernel that can backpropagate on
# scale and zero point.
# original_kernel represents the original fake quantize c++ kernel.
def fakeQuantizePerTensorLearnableKernel(
input, scale, zero_point,
quant_min: int, quant_max: int
):
return torch._fake_quantize_learnable_per_tensor_affine(input, scale, zero_point, quant_min, quant_max)
def fakeQuantizePerTensorOriginalKernel(
input, scale, zero_point,
quant_min: int, quant_max: int
):
return torch.fake_quantize_per_tensor_affine(input, 1.0, 0, quant_min, quant_max)
fake_quantize_per_tensor_ops = op_bench.op_list(
attrs=(
('learnable_kernel', fakeQuantizePerTensorLearnableKernel),
('original_kernel', fakeQuantizePerTensorOriginalKernel)
),
attr_names=('op_name', 'op_func'),
)
fake_quantize_operator_configs_short = op_bench.config_list(
cross_product_configs={
'nbits': (4, 8),
'device': ('cpu', 'cuda'),
},
**fake_quantize_configs_short_dict
)
fake_quantize_operator_configs_long = op_bench.cross_product_configs(
nbits=(4, 8),
device=('cpu', 'cuda'),
**fake_quantize_configs_long_dict
)
# TODO(future PR) Combine config for floating point zero_point with other configs, once it is
# fully supported in all fakeQuant operators and devices for
# https://github.com/pytorch/pytorch/issues/61866.
fake_quantize_configs_long_dict_float_zero_point = fake_quantize_configs_long_dict.copy()
fake_quantize_configs_long_dict_float_zero_point['zero_point_dtype'] = [torch.float32, torch.half]
fake_quantize_operator_configs_long_float_zero_point = op_bench.cross_product_configs(
nbits=(8,),
device=('cpu', 'cuda'),
**fake_quantize_configs_long_dict_float_zero_point
)
class FakeQuantizePerTensorBaseOpBenchmark(op_bench.TorchBenchmarkBase):
r"""Benchmarks 3 different fake quantize per tensor operators."""
def init(self, N, C, H, W, zero_point_dtype, nbits, device, op_func):
self.quant_min = 0
self.quant_max = 2 ** nbits - 1
self.quant_range = 2 ** nbits
self.input = nn.Parameter(torch.rand(N, C, H, W, dtype=torch.float, device=device), requires_grad=self.auto_set())
self.scale = nn.Parameter(torch.tensor([1.]).to(device), requires_grad=self.auto_set())
if op_func.__name__ == 'fakeQuantizePerChannelOriginalKernel':
self.zero_point = nn.Parameter(torch.tensor([0.]).to(device).to(zero_point_dtype), requires_grad=self.auto_set())
else:
self.zero_point = nn.Parameter(torch.tensor([0.]).to(device), requires_grad=self.auto_set())
self.inputs = {
"input": self.input,
"scale": self.scale,
"zero_point": self.zero_point,
"quant_min": self.quant_min,
"quant_max": self.quant_max,
}
self.op_func = op_func
def forward(
self, input, scale, zero_point,
quant_min: int, quant_max: int
):
return self.op_func(input, scale, zero_point, quant_min, quant_max)
op_bench.generate_pt_tests_from_op_list(
fake_quantize_per_tensor_ops,
fake_quantize_operator_configs_short + fake_quantize_operator_configs_long,
FakeQuantizePerTensorBaseOpBenchmark
)
op_bench.generate_pt_gradient_tests_from_op_list(
fake_quantize_per_tensor_ops,
fake_quantize_operator_configs_short + fake_quantize_operator_configs_long,
FakeQuantizePerTensorBaseOpBenchmark
)
def fakeQuantizePerChannelLearnableKernel(
input, scale, zero_point, axis: int,
quant_min: int, quant_max: int
):
return torch._fake_quantize_learnable_per_channel_affine(input, scale, zero_point, axis, quant_min, quant_max)
def fakeQuantizePerChannelOriginalKernel(
input, scale, zero_point, axis: int,
quant_min: int, quant_max: int
):
return torch.fake_quantize_per_channel_affine(input, scale, zero_point, axis, quant_min, quant_max)
fake_quantize_per_channel_ops = op_bench.op_list(
attrs=(
('learnable_kernel', fakeQuantizePerChannelLearnableKernel),
('original_kernel', fakeQuantizePerChannelOriginalKernel)
),
attr_names=('op_name', 'op_func'),
)
fake_quantize_per_channel_float_zero_point_ops = op_bench.op_list(
attrs=(
('original_kernel', fakeQuantizePerChannelOriginalKernel),
),
attr_names=('op_name', 'op_func'),
)
class FakeQuantizePerChannelOpBenchmark(op_bench.TorchBenchmarkBase):
r"""Benchmarks 3 different fake quantize per channel operators."""
def init(self, N, C, H, W, zero_point_dtype, nbits, device, op_func):
self.quant_min = 0
self.quant_max = 2 ** nbits - 1
self.quant_range = 2 ** nbits
# Axis is chosen with respect to the number of channels: C.
self.axis = 1
self.input = nn.Parameter(torch.rand(N, C, H, W, dtype=torch.float, device=device, requires_grad=self.auto_set()))
if op_func.__name__ == 'fakeQuantizePerChannelOriginalKernel':
self.scale = torch.ones(C, device=device, dtype=torch.float32, requires_grad=False)
self.zero_point = torch.zeros(C, device=device, dtype=zero_point_dtype, requires_grad=False)
else:
self.scale = nn.Parameter(torch.ones(C, device=device, dtype=torch.float32), requires_grad=self.auto_set())
self.zero_point = nn.Parameter(torch.zeros(C, device=device, dtype=torch.float32), requires_grad=self.auto_set())
self.inputs = {
"input": self.input,
"scale": self.scale,
"zero_point": self.zero_point,
"axis": self.axis,
"quant_min": self.quant_min,
"quant_max": self.quant_max,
}
self.op_func = op_func
def forward(
self, input, scale, zero_point,
axis: int, quant_min: int, quant_max: int
):
return self.op_func(input, scale, zero_point, axis, quant_min, quant_max)
op_bench.generate_pt_tests_from_op_list(
fake_quantize_per_channel_ops,
fake_quantize_operator_configs_short + fake_quantize_operator_configs_long,
FakeQuantizePerChannelOpBenchmark
)
op_bench.generate_pt_tests_from_op_list(
fake_quantize_per_channel_float_zero_point_ops,
fake_quantize_operator_configs_long_float_zero_point,
FakeQuantizePerChannelOpBenchmark
)
op_bench.generate_pt_gradient_tests_from_op_list(
fake_quantize_per_channel_ops,
fake_quantize_operator_configs_short + fake_quantize_operator_configs_long,
FakeQuantizePerChannelOpBenchmark
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/quantization_test.py |
import operator_benchmark as op_bench
import torch
from torch import nn
from torch.ao import sparsity
"""Microbenchmarks for sparsifier."""
sparse_configs_short = op_bench.config_list(
attr_names=["M", "SL", "SBS", "ZPB"],
attrs=[
[(32, 16), 0.3, (4, 1), 2],
[(32, 16), 0.6, (1, 4), 4],
[(17, 23), 0.9, (1, 1), 1]
],
tags=("short",)
)
sparse_configs_long = op_bench.cross_product_configs(
M=((128, 128), (255, 324)), # Mask shape
SL=(0.0, 1.0, 0.3, 0.6, 0.9, 0.99), # Sparsity level
SBS=((1, 4), (1, 8), (4, 1), (8, 1)), # Sparse block shape
ZPB=(0, 1, 2, 3, 4, None), # Zeros per block
tags=("long",)
)
class WeightNormSparsifierBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, SL, SBS, ZPB):
weight = torch.ones(M)
model = nn.Module()
model.register_buffer("weight", weight)
sparse_config = [{"tensor_fqn": "weight"}]
self.sparsifier = sparsity.WeightNormSparsifier(
sparsity_level=SL,
sparse_block_shape=SBS,
zeros_per_block=ZPB,
)
self.sparsifier.prepare(model, config=sparse_config)
self.inputs = {} # All benchmarks need inputs :)
self.set_module_name("weight_norm_sparsifier_step")
def forward(self):
self.sparsifier.step()
all_tests = sparse_configs_short + sparse_configs_long
op_bench.generate_pt_test(all_tests, WeightNormSparsifierBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/ao_sparsifier_test.py |
import operator_benchmark as op_bench
import torch
from typing import List
"""Microbenchmarks for as_strided operator"""
# Configs for PT as_strided operator
as_strided_configs_short = op_bench.config_list(
attr_names=["M", "N", "size", "stride", "storage_offset"],
attrs=[
[8, 8, (2, 2), (1, 1), 0],
[256, 256, (32, 32), (1, 1), 0],
[512, 512, (64, 64), (2, 2), 1],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=["short"],
)
as_strided_configs_long = op_bench.cross_product_configs(
M=[512],
N=[1024],
size=[(16, 16), (128, 128)],
stride=[(1, 1)],
storage_offset=[0, 1],
device=['cpu', 'cuda'],
tags=['long']
)
class As_stridedBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, size, stride, storage_offset, device):
self.inputs = {
"input_one": torch.rand(M, N, device=device),
"size": size,
"stride": stride,
"storage_offset": storage_offset
}
self.set_module_name('as_strided')
def forward(
self, input_one, size: List[int], stride: List[int], storage_offset: int
):
return torch.as_strided(
input_one, size, stride, storage_offset)
op_bench.generate_pt_test(as_strided_configs_short + as_strided_configs_long,
As_stridedBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/as_strided_test.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for remainder operators."""
# Benchmark ops performance with broadcast
remainder_ops_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['fmod', torch.fmod],
['remainder', torch.remainder],
],
)
remainder_short_configs = op_bench.config_list(
attr_names=['M', 'N', 'K'],
attrs=[
[1, 1, 1],
[64, 64, 64],
[64, 64, 128],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
'dtype' : [torch.int32, torch.float, torch.double],
},
tags=['short'],
)
remainder_long_configs = op_bench.cross_product_configs(
M=[8, 128],
N=[32, 64],
K=[256, 512],
device=['cpu', 'cuda'],
dtype=[torch.int32, torch.float, torch.double],
tags=['long']
)
class RemainderOpBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device, dtype, op_func):
self.dividend = torch.rand(M, N, K, device=device)
self.dividend = (self.dividend * 1000 - 500).to(dtype=dtype)
self.divisor = torch.rand(M, N, K, device=device)
# +1 so we don't divide by zero
self.divisor = (self.divisor * 40 + 1).to(dtype=dtype)
self.inputs = {
"dividend": self.dividend,
"divisor": self.divisor
}
self.op_func = op_func
def forward(self, dividend, divisor):
return self.op_func(dividend, divisor)
op_bench.generate_pt_tests_from_op_list(remainder_ops_list,
remainder_short_configs + remainder_long_configs,
RemainderOpBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/remainder_test.py |
import operator_benchmark as op_bench
"""
Configs shared by multiple benchmarks
"""
def remove_cuda(config_list):
cuda_config = {'device': 'cuda'}
return [config for config in config_list if cuda_config not in config]
# Configs for conv-1d ops
conv_1d_configs_short = op_bench.config_list(
attr_names=[
'IC', 'OC', 'kernel', 'stride', 'N', 'L'
],
attrs=[
[128, 256, 3, 1, 1, 64],
[256, 256, 3, 2, 4, 64],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=['short']
)
conv_1d_configs_long = op_bench.cross_product_configs(
IC=[128, 512],
OC=[128, 512],
kernel=[3],
stride=[1, 2],
N=[8],
L=[128],
device=['cpu', 'cuda'],
tags=["long"]
)
# Configs for Conv2d and ConvTranspose1d
conv_2d_configs_short = op_bench.config_list(
attr_names=[
'IC', 'OC', 'kernel', 'stride', 'N', 'H', 'W', 'G', 'pad',
],
attrs=[
[256, 256, 3, 1, 1, 16, 16, 1, 0],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=['short']
)
conv_2d_configs_long = op_bench.cross_product_configs(
IC=[128, 256],
OC=[128, 256],
kernel=[3],
stride=[1, 2],
N=[4],
H=[32],
W=[32],
G=[1],
pad=[0],
device=['cpu', 'cuda'],
tags=["long"]
)
# Configs for Conv3d and ConvTranspose3d
conv_3d_configs_short = op_bench.config_list(
attr_names=[
'IC', 'OC', 'kernel', 'stride', 'N', 'D', 'H', 'W'
],
attrs=[
[64, 64, 3, 1, 8, 4, 16, 16],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=['short']
)
linear_configs_short = op_bench.config_list(
attr_names=["N", "IN", "OUT"],
attrs=[
[1, 1, 1],
[4, 256, 128],
[16, 512, 256],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=["short"]
)
linear_configs_long = op_bench.cross_product_configs(
N=[32, 64],
IN=[128, 512],
OUT=[64, 128],
device=['cpu', 'cuda'],
tags=["long"]
)
embeddingbag_short_configs = op_bench.cross_product_configs(
embeddingbags=[10, 120, 1000, 2300],
dim=[64],
mode=['sum'],
input_size=[8, 16, 64],
offset=[0],
sparse=[True, False],
include_last_offset=[True, False],
device=['cpu'],
tags=['short']
)
embedding_short_configs = op_bench.cross_product_configs(
num_embeddings=[10, 120, 1000, 2300],
embedding_dim=[64],
input_size=[8, 16, 64],
device=['cpu'],
tags=['short']
)
| pytorch-master | benchmarks/operator_benchmark/pt/configs.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for quantized batchnorm operator."""
batchnorm_configs_short = op_bench.config_list(
attr_names=["M", "N", "K"],
attrs=[
[1, 256, 3136],
],
cross_product_configs={
'device': ['cpu'],
'dtype': (torch.qint8,),
},
tags=["short"]
)
class QBatchNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device, dtype):
self._init(M, N, K, device)
x_scale = 0.1
x_zero_point = 0
self.inputs = {
"q_input_one": torch.quantize_per_tensor(
self.input_one, scale=x_scale, zero_point=x_zero_point, dtype=dtype),
"mean": torch.rand(N),
"var": torch.rand(N),
"weight": torch.rand(N),
"bias": torch.rand(N),
"eps": 1e-5,
"Y_scale": 0.1,
"Y_zero_point": 0
}
def _init(self, M, N, K, device):
pass
def forward(self):
pass
class QBatchNorm1dBenchmark(QBatchNormBenchmark):
def _init(self, M, N, K, device):
self.set_module_name("QBatchNorm1d")
self.input_one = torch.rand(M, N, K, device=device, requires_grad=self.auto_set())
def forward(
self,
q_input_one,
weight,
bias,
mean,
var,
eps: float,
Y_scale: float,
Y_zero_point: int
):
return torch.ops.quantized.batch_norm1d(
q_input_one, weight, bias, mean, var, eps,
Y_scale, Y_zero_point)
class QBatchNorm2dBenchmark(QBatchNormBenchmark):
def _init(self, M, N, K, device):
self.set_module_name("QBatchNorm2d")
# Note: quantized implementation requires rank 4, which is why we
# add a 1 as the last dimension
self.input_one = torch.rand(M, N, K, 1, device=device, requires_grad=self.auto_set())
def forward(
self,
q_input_one,
weight,
bias,
mean,
var,
eps: float,
Y_scale: float,
Y_zero_point: int
):
return torch.ops.quantized.batch_norm2d(
q_input_one, weight, bias, mean, var, eps,
Y_scale, Y_zero_point)
op_bench.generate_pt_test(batchnorm_configs_short, QBatchNorm1dBenchmark)
op_bench.generate_pt_test(batchnorm_configs_short, QBatchNorm2dBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/qbatchnorm_test.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for add_ operator. Supports both Caffe2/PyTorch."""
# Configs for PT add operator
add_long_configs = op_bench.cross_product_configs(
M=[8, 128],
N=[32, 64],
K=[256, 512],
device=['cpu', 'cuda'],
tags=["long"]
)
add_short_configs = op_bench.config_list(
attr_names=["M", "N", "K"],
attrs=[
[1, 1, 1],
[64, 64, 64],
[64, 64, 128],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=["short"],
)
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
self.inputs = {
"input_one": torch.rand(M, N, K, device=device, requires_grad=self.auto_set()),
"input_two": torch.rand(M, N, K, device=device, requires_grad=self.auto_set())
}
self.set_module_name("add")
def forward(self, input_one, input_two):
return torch.add(input_one, input_two)
# The generated test names based on add_short_configs will be in the following pattern:
# add_M8_N16_K32_devicecpu
# add_M8_N16_K32_devicecpu_bwdall
# add_M8_N16_K32_devicecpu_bwd1
# add_M8_N16_K32_devicecpu_bwd2
# ...
# Those names can be used to filter tests.
op_bench.generate_pt_test(add_long_configs + add_short_configs, AddBenchmark)
op_bench.generate_pt_gradient_test(add_long_configs + add_short_configs, AddBenchmark)
"""Mircobenchmark for addmm operator."""
class AddmmBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
self.inputs = {
"input_one": torch.rand(M, K, device=device, requires_grad=self.auto_set()),
"mat1": torch.rand(M, N, device=device, requires_grad=self.auto_set()),
"mat2": torch.rand(N, K, device=device, requires_grad=self.auto_set())
}
self.set_module_name("addmm")
def forward(self, input_one, mat1, mat2):
return torch.addmm(input_one, mat1, mat2)
op_bench.generate_pt_test(add_long_configs + add_short_configs, AddmmBenchmark)
op_bench.generate_pt_gradient_test(add_long_configs + add_short_configs, AddmmBenchmark)
"""Mircobenchmark for addr operator."""
class AddrBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, device, dtype):
self.inputs = {
"input_one": torch.rand((M, N), device=device, requires_grad=self.auto_set(), dtype=dtype),
"vec1": torch.rand((M,), device=device, requires_grad=self.auto_set(), dtype=dtype),
"vec2": torch.rand((N,), device=device, requires_grad=self.auto_set(), dtype=dtype)
}
self.set_module_name("addr")
def forward(self, input_one, vec1, vec2):
return torch.addr(input_one, vec1, vec2)
addr_configs = op_bench.cross_product_configs(
M=[8, 256],
N=[256, 16],
device=['cpu', 'cuda'],
dtype=[torch.double, torch.half],
tags=["addr"],
)
op_bench.generate_pt_test(addr_configs, AddrBenchmark)
op_bench.generate_pt_gradient_test(addr_configs, AddrBenchmark)
"""Mircobenchmark for addbmm operator."""
class AddbmmBenchmark(op_bench.TorchBenchmarkBase):
def init(self, B, M, N, K, device):
self.inputs = {
"input_one": torch.rand((M, N), device=device, requires_grad=self.auto_set()),
"batch1": torch.rand((B, M, K), device=device, requires_grad=self.auto_set()),
"batch2": torch.rand((B, K, N,), device=device, requires_grad=self.auto_set())
}
self.set_module_name("addbmm")
def forward(self, input_one, batch1, batch2):
return torch.addbmm(input_one, batch1, batch2)
addbmm_configs = op_bench.cross_product_configs(
B=[2, 100],
M=[8, 256],
N=[256, 16],
K=[15, 16],
device=['cpu', 'cuda'],
tags=["addbmm"],
)
op_bench.generate_pt_test(addbmm_configs, AddbmmBenchmark)
op_bench.generate_pt_gradient_test(addbmm_configs, AddbmmBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/add_test.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for point-wise unary operator."""
# Configs for pointwise unary ops
unary_ops_configs_short = op_bench.config_list(
attr_names=['M', 'N'],
attrs=[
[512, 512],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=['short']
)
unary_ops_configs_long = op_bench.cross_product_configs(
M=[256, 1024],
N=[256, 1024],
device=['cpu', 'cuda'],
tags=['long']
)
class UnaryOpBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, device, op_func):
self.inputs = {
"input": torch.rand(M, N, device=device)
}
self.op_func = op_func
def forward(self, input):
return self.op_func(input)
def bernoulli_(input):
return input.bernoulli_()
def cauchy_(input):
return input.cauchy_()
def digamma_(input):
return input.digamma_()
def exponential_(input):
return input.exponential_()
def normal_(input):
return input.normal_()
def random_(input):
return input.random_()
def sign_(input):
return input.sign_()
def uniform_(input):
return input.uniform_()
def half_(input):
return input.half()
def long_(input):
return input.long()
unary_ops_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['abs', torch.abs],
['abs_', torch.abs_],
['acos', torch.acos],
['acos_', torch.acos_],
['argsort', torch.argsort],
['asin', torch.asin],
['asin_', torch.asin_],
['atan', torch.atan],
['atan_', torch.atan_],
['ceil', torch.ceil],
['ceil_', torch.ceil_],
['clone', torch.clone],
['cos', torch.cos],
['cos_', torch.cos_],
['cosh', torch.cosh],
['digamma', torch.digamma],
['erf', torch.erf],
['erf_', torch.erf_],
['erfc', torch.erfc],
['erfc_', torch.erfc_],
['erfinv', torch.erfinv],
['exp', torch.exp],
['exp_', torch.exp_],
['expm1', torch.expm1],
['expm1_', torch.expm1_],
['floor', torch.floor],
['floor_', torch.floor_],
['frac', torch.frac],
['frac_', torch.frac_],
['hardshrink', torch.hardshrink],
['lgamma', torch.lgamma],
['log', torch.log],
['log10', torch.log10],
['log10_', torch.log10_],
['log1p', torch.log1p],
['log1p_', torch.log1p_],
['log2', torch.log2],
['log2_', torch.log2_],
['log_', torch.log_],
['logit', torch.logit],
['logit_', torch.logit_],
['neg', torch.neg],
['neg_', torch.neg_],
['reciprocal', torch.reciprocal],
['reciprocal_', torch.reciprocal_],
['relu', torch.relu],
['relu_', torch.relu_],
['round', torch.round],
['round_', torch.round_],
['rsqrt', torch.rsqrt],
['rsqrt_', torch.rsqrt_],
['sigmoid', torch.sigmoid],
['sigmoid_', torch.sigmoid_],
['sign', torch.sign],
['sgn', torch.sgn],
['sin', torch.sin],
['sin_', torch.sin_],
['sinh', torch.sinh],
['sqrt', torch.sqrt],
['sqrt_', torch.sqrt_],
['square', torch.square],
['square_', torch.square_],
['tan', torch.tan],
['tan_', torch.tan_],
['tanh', torch.tanh],
['tanh_', torch.tanh_],
['trunc', torch.trunc],
['trunc_', torch.trunc_],
['unique', torch.functional._return_output],
['zero_', torch.zero_],
['bernoulli_', bernoulli_],
['cauchy_', cauchy_],
['digamma_', digamma_],
['exponential_', exponential_],
['normal_', normal_],
['random_', random_],
['sign_', sign_],
['uniform_', uniform_],
['half', half_],
['long', long_],
],
)
op_bench.generate_pt_tests_from_op_list(unary_ops_list,
unary_ops_configs_short + unary_ops_configs_long,
UnaryOpBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/unary_test.py |
import operator_benchmark as op_bench
import torch
import torch.ao.quantization.observer as obs
qobserver_short_configs_dict = {
'attr_names': ('C', 'M', 'N', 'dtype', 'device'),
'attrs': (
(3, 512, 512, torch.quint8, 'cpu'),
(3, 512, 512, torch.quint8, 'cuda'),
),
'tags': ('short',),
}
q_hist_observer_short_configs_dict = {
'attr_names': ('C', 'M', 'N', 'dtype', 'device'),
'attrs': (
(3, 512, 512, torch.quint8, 'cpu'),
),
'tags': ('short',),
}
qobserver_long_configs_dict = {
'C': (32, 64),
'M': (256, 1024),
'N': (256, 1024),
'device': ('cpu', 'cuda'),
'dtype': (torch.quint8,), # dtype doesn't change the timing, keep the same
'tags': ('long',),
}
q_hist_observer_long_configs_dict = {
'C': (1, 3, 8),
'M': (256, 1024),
'N': (256, 1024),
'device': ('cpu',),
'dtype': (torch.quint8,), # dtype doesn't change the timing, keep the same
'tags': ('long',),
}
qobserver_per_tensor_configs_short = op_bench.config_list(
cross_product_configs={
'qscheme': (torch.per_tensor_affine, torch.per_tensor_symmetric)
},
**qobserver_short_configs_dict,
)
qobserver_per_tensor_configs_long = op_bench.cross_product_configs(
qscheme=(torch.per_tensor_affine, torch.per_tensor_symmetric),
**qobserver_long_configs_dict,
)
qobserver_per_channel_configs_short = op_bench.config_list(
cross_product_configs={
'qscheme': (torch.per_channel_affine, torch.per_channel_symmetric)
},
**qobserver_short_configs_dict,
)
qobserver_per_channel_configs_long = op_bench.cross_product_configs(
qscheme=(torch.per_channel_affine, torch.per_channel_symmetric),
**qobserver_long_configs_dict,
)
q_hist_observer_per_tensor_configs_short = op_bench.config_list(
cross_product_configs={
'qscheme': (torch.per_tensor_affine, torch.per_tensor_symmetric)
},
**q_hist_observer_short_configs_dict,
)
q_hist_observer_per_tensor_configs_long = op_bench.cross_product_configs(
qscheme=(torch.per_tensor_affine, torch.per_tensor_symmetric),
**q_hist_observer_long_configs_dict,
)
qobserver_per_tensor_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['MinMaxObserver', obs.MinMaxObserver],
['MovingAverageMinMaxObserver', obs.MovingAverageMinMaxObserver],
]
)
qobserver_per_channel_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['PerChannelMinMaxObserver', obs.PerChannelMinMaxObserver],
['MovingAveragePerChannelMinMaxObserver',
obs.MovingAveragePerChannelMinMaxObserver],
]
)
q_hist_observer_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['HistogramObserver', obs.HistogramObserver],
['HistogramObserverCalculateQparams', obs.HistogramObserver],
]
)
class QObserverBenchmark(op_bench.TorchBenchmarkBase):
def init(self, C, M, N, dtype, qscheme, op_func, device):
self.inputs = {
"f_input": torch.rand(C, M, N, device=device)
}
self.op_func = op_func(dtype=dtype, qscheme=qscheme).to(device)
def forward(self, f_input):
self.op_func(f_input)
return self.op_func.calculate_qparams()
class QObserverBenchmarkCalculateQparams(op_bench.TorchBenchmarkBase):
def init(self, C, M, N, dtype, qscheme, op_func, device):
self.f_input = torch.rand(C, M, N, device=device)
self.q_observer = op_func(dtype=dtype, qscheme=qscheme).to(device)
self.q_observer(self.f_input)
self.inputs = {}
def forward(self):
return self.q_observer.calculate_qparams()
op_bench.generate_pt_tests_from_op_list(
qobserver_per_tensor_list,
qobserver_per_tensor_configs_short + qobserver_per_tensor_configs_long,
QObserverBenchmark)
op_bench.generate_pt_tests_from_op_list(
qobserver_per_channel_list,
qobserver_per_channel_configs_short + qobserver_per_channel_configs_long,
QObserverBenchmark)
op_bench.generate_pt_tests_from_op_list(
q_hist_observer_list,
q_hist_observer_per_tensor_configs_short + q_hist_observer_per_tensor_configs_long,
QObserverBenchmarkCalculateQparams)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/qobserver_test.py |
import operator_benchmark as op_bench
import torch
import torch.nn.functional as F
"""Microbenchmarks for instancenorm operator."""
instancenorm_configs_short = op_bench.cross_product_configs(
dims=(
(32, 8, 16),
(32, 8, 56, 56),
),
tags=["short"],
)
class InstanceNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims):
num_channels = dims[1]
self.inputs = {
"input": (torch.rand(*dims) - 0.5) * 256,
"weight": torch.rand(num_channels, dtype=torch.float),
"bias": torch.rand(num_channels, dtype=torch.float),
"eps": 1e-5
}
def forward(self, input, weight, bias, eps: float):
return F.instance_norm(
input, weight=weight, bias=bias, eps=eps)
op_bench.generate_pt_test(instancenorm_configs_short, InstanceNormBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/instancenorm_test.py |
import operator_benchmark as op_bench
import torch
from torch.testing._internal.common_device_type import get_all_device_types
"""Microbenchmark for Fill_ operator."""
fill_short_configs = op_bench.config_list(
attr_names=["N"],
attrs=[
[1],
[1024],
[2048],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
'dtype': [torch.int32],
},
tags=["short"],
)
fill_long_configs = op_bench.cross_product_configs(
N=[10, 1000],
device=get_all_device_types(),
dtype=[torch.bool, torch.int8, torch.uint8, torch.int16, torch.int32,
torch.int64, torch.half, torch.float, torch.double],
tags=["long"]
)
class Fill_Benchmark(op_bench.TorchBenchmarkBase):
def init(self, N, device, dtype):
self.inputs = {
"input_one": torch.zeros(N, device=device).type(dtype)
}
self.set_module_name("fill_")
def forward(self, input_one):
return input_one.fill_(10)
op_bench.generate_pt_test(fill_short_configs + fill_long_configs,
Fill_Benchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/fill_test.py |
import operator_benchmark as op_bench
import torch
import torch.nn.quantized as nnq
from pt import configs
"""
Microbenchmarks for qConv operators.
"""
class QConv1dBenchmark(op_bench.TorchBenchmarkBase):
# def init(self, N, IC, OC, L, G, kernel, stride, pad):
def init(self, IC, OC, kernel, stride, N, L, device):
G = 1
pad = 0
self.scale = 1.0 / 255
self.zero_point = 0
X = torch.randn(N, IC, L, dtype=torch.float32)
qX = torch.quantize_per_tensor(
X, scale=self.scale, zero_point=self.zero_point, dtype=torch.quint8
)
# Convert the tensor to NHWC format
W = torch.randn(OC, IC // G, kernel, dtype=torch.float32)
self.qW = torch.quantize_per_tensor(W, scale=self.scale, zero_point=0, dtype=torch.qint8)
self.inputs = {
"input": qX
}
self.qconv1d = nnq.Conv1d(IC, OC, kernel, stride=stride, padding=pad, groups=G)
self.qconv1d.set_weight_bias(self.qW, None)
self.qconv1d.scale = torch.tensor(self.scale, dtype=torch.double)
self.qconv1d.zero_point = torch.tensor(self.zero_point, dtype=torch.int)
self.set_module_name("QConv1d")
def forward(self, input):
return self.qconv1d(input)
class QConv2dBenchmark(op_bench.TorchBenchmarkBase):
# def init(self, N, IC, OC, H, W, G, kernel, stride, pad):
def init(self, IC, OC, kernel, stride, N, H, W, G, pad, device):
# super(QConv2dBenchmark, self).init(N, IC, OC, (H, W), G, (kernel, kernel), stride, pad)
self.scale = 1.0 / 255
self.zero_point = 0
X = torch.randn(N, IC, H, W, dtype=torch.float32)
qX = torch.quantize_per_tensor(
X, scale=self.scale, zero_point=self.zero_point, dtype=torch.quint8
)
# Convert the tensor to NHWC format
W = torch.randn(OC, IC // G, kernel, kernel, dtype=torch.float32)
self.qW = torch.quantize_per_tensor(W, scale=self.scale, zero_point=0, dtype=torch.qint8)
self.inputs = {
"input": qX
}
self.qconv2d = nnq.Conv2d(IC, OC, kernel, stride=stride, padding=pad, groups=G)
self.qconv2d.set_weight_bias(self.qW, None)
self.qconv2d.scale = torch.tensor(self.scale, dtype=torch.double)
self.qconv2d.zero_point = torch.tensor(self.zero_point, dtype=torch.int)
self.set_module_name("QConv2d")
def forward(self, input):
return self.qconv2d(input)
op_bench.generate_pt_test(configs.remove_cuda(configs.conv_1d_configs_short + configs.conv_1d_configs_long), QConv1dBenchmark)
op_bench.generate_pt_test(configs.remove_cuda(configs.conv_2d_configs_short + configs.conv_2d_configs_long), QConv2dBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/qconv_test.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for quantized unary operators (point-wise and reduction)."""
# Configs for pointwise and reduction unary ops
qunary_ops_configs_short = op_bench.config_list(
attr_names=['M', 'N'],
attrs=[
[512, 512],
],
cross_product_configs={
'dtype': [torch.quint8],
},
tags=['short']
)
qunary_ops_configs_long = op_bench.cross_product_configs(
M=[256, 1024],
N=[256, 1024],
dtype=[torch.quint8, torch.qint8, torch.qint32],
tags=['long']
)
class QUnaryOpBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, dtype, op_func):
f_input = torch.rand(M, N)
scale = 1.0
zero_point = 0
self.inputs = {
"q_input": torch.quantize_per_tensor(f_input, scale=scale,
zero_point=zero_point,
dtype=dtype)
}
self.op_func = op_func
def forward(self, q_input):
return self.op_func(q_input)
# TODO: Uncomment the ops whenever they are implemented for quantized tensor.
qunary_ops_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
# ['q_abs', torch.abs],
# ['q_abs_', torch.abs_],
# ['q_acos', torch.acos],
# ['q_acos_', torch.acos_],
['q_argsort', torch.argsort],
# ['q_asin', torch.asin],
# ['q_asin_', torch.asin_],
# ['q_atan', torch.atan],
# ['q_atan_', torch.atan_],
# ['q_ceil', torch.ceil],
# ['q_ceil_', torch.ceil_],
['q_clone', torch.clone],
# ['q_cos', torch.cos],
# ['q_cos_', torch.cos_],
# ['q_cosh', torch.cosh],
# ['q_digamma', torch.digamma],
# ['q_erf', torch.erf],
# ['q_erf_', torch.erf_],
# ['q_erfc', torch.erfc],
# ['q_erfc_', torch.erfc_],
# ['q_erfinv', torch.erfinv],
# ['q_exp', torch.exp],
# ['q_exp_', torch.exp_],
# ['q_expm1', torch.expm1],
# ['q_expm1_', torch.expm1_],
# ['q_floor', torch.floor],
# ['q_floor_', torch.floor_],
# ['q_frac', torch.frac],
# ['q_frac_', torch.frac_],
# ['q_hardshrink', torch.hardshrink],
# ['q_lgamma', torch.lgamma],
# ['q_log', torch.log],
# ['q_log10', torch.log10],
# ['q_log10_', torch.log10_],
# ['q_log1p', torch.log1p],
# ['q_log1p_', torch.log1p_],
# ['q_log2', torch.log2],
# ['q_log2_', torch.log2_],
# ['q_log_', torch.log_],
['q_mean', torch.mean],
# ['q_neg', torch.neg],
# ['q_neg_', torch.neg_],
# ['q_reciprocal', torch.reciprocal],
# ['q_reciprocal_', torch.reciprocal_],
['q_relu', torch.relu],
['q_relu_', torch.relu_],
# ['q_round', torch.round],
# ['q_round_', torch.round_],
# ['q_rsqrt', torch.rsqrt],
# ['q_rsqrt_', torch.rsqrt_],
# ['q_sigmoid', torch.sigmoid],
# ['q_sigmoid_', torch.sigmoid_],
# ['q_sign', torch.sign],
# ['q_sin', torch.sin],
# ['q_sin_', torch.sin_],
# ['q_sinh', torch.sinh],
['q_sort', torch.sort],
# ['q_sqrt', torch.sqrt],
# ['q_sqrt_', torch.sqrt_],
# ['q_tan', torch.tan],
# ['q_tan_', torch.tan_],
# ['q_tanh', torch.tanh],
# ['q_tanh_', torch.tanh_],
# ['q_trunc', torch.trunc],
# ['q_trunc_', torch.trunc_],
# ['q_unique', torch.unique],
# ['q_zero_', torch.zero_],
# ['q_bernoulli_', lambda t: t.bernoulli_()],
# ['q_cauchy_', lambda t: t.cauchy_()],
# ['q_digamma_', lambda t: t.digamma_()],
# ['q_exponential_', lambda t: t.exponential_()],
# ['q_normal_', lambda t: t.normal_()],
# ['q_random_', lambda t: t.random_()],
# ['q_sign_', lambda t: t.sign_()],
# ['q_uniform_', lambda t: t.uniform_()],
# ['q_half', lambda t: t.half()],
# ['q_long', lambda t: t.long()],
],
)
op_bench.generate_pt_tests_from_op_list(qunary_ops_list,
qunary_ops_configs_short + qunary_ops_configs_long,
QUnaryOpBenchmark)
# === Other unary ops (i.e. the ones that need parameters as args) ===
# Configs for pointwise and reduction unary ops
qunary_ops_topk_configs_short = op_bench.config_list(
attr_names=['M', 'N', 'k'],
attrs=[
[512, 512, 5],
],
cross_product_configs={
'dtype': [torch.quint8],
},
tags=['short']
)
qunary_ops_topk_configs_long = op_bench.cross_product_configs(
M=[256, 1024],
N=[256, 1024],
k=[1, 3, 5],
dtype=[torch.quint8, torch.qint8, torch.qint32],
tags=['long']
)
class QTopkOpBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, dtype, k):
f_input = torch.rand(M, N)
scale = 1.0
zero_point = 0
self.inputs = {
"q_input": torch.quantize_per_tensor(f_input, scale=scale,
zero_point=zero_point,
dtype=dtype),
"k": k
}
self.set_module_name('qtopk')
def forward(self, q_input, k: int):
return torch.topk(q_input, k)
op_bench.generate_pt_test(qunary_ops_topk_configs_short + qunary_ops_topk_configs_long,
QTopkOpBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/qunary_test.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for interpolate operator."""
class InterpolateBenchmark(op_bench.TorchBenchmarkBase):
def init(self, input_size, output_size, channels_last=False, mode='linear', dtype=torch.float):
input_image = torch.randint(0, 256, size=input_size, dtype=dtype, device='cpu',
requires_grad=self.auto_set())
if channels_last:
if input_image.ndim == 4:
input_image = input_image.contiguous(memory_format=torch.channels_last)
elif input_image.ndim == 5:
input_image = input_image.contiguous(memory_format=torch.channels_last_3d)
else:
raise ValueError(
f"Can not set channels_last to the input of {input_image.ndim} dims"
)
align_corners = None if mode == "nearest" else False
if mode == "linear":
mode = {
3: 'linear',
4: 'bilinear',
5: 'trilinear',
}[input_image.ndim]
self.inputs = {
"input_image": input_image,
"output_size": output_size,
"mode": mode,
"align_corners": align_corners,
}
self.set_module_name("interpolate")
def forward(self, input_image, output_size, mode, align_corners):
return torch.nn.functional.interpolate(input_image, size=output_size, mode=mode,
align_corners=align_corners)
config_short = op_bench.config_list(
attr_names=["input_size", "output_size"],
attrs=[
[(1, 3, 60, 40), (24, 24)],
[(1, 3, 600, 400), (240, 240)],
[(1, 3, 320, 320), (256, 256)],
],
cross_product_configs={
'channels_last': [True, False],
'mode': ["nearest", "linear", "bicubic"],
},
tags=["short"],
)
config_short += op_bench.config_list(
attr_names=["input_size", "output_size"],
attrs=[
[(1, 3, 60, 40), (24, 24)],
[(1, 3, 600, 400), (240, 240)],
[(1, 3, 320, 320), (256, 256)],
],
cross_product_configs={
'channels_last': [True, False],
'mode': ["nearest", ],
'dtype': [torch.uint8, ],
},
tags=["short"],
)
config_long = op_bench.config_list(
attr_names=["input_size", "output_size"],
attrs=[
[(1, 3, 320, 320), (512, 512)],
[(1, 3, 500, 500), (256, 256)],
[(1, 3, 500, 500), (800, 800)],
# vectorization test-case
[(2, 128, 64, 46), (128, 128)],
[(2, 128, 64, 46), (32, 24)],
],
cross_product_configs={
'channels_last': [True, False],
'mode': ["nearest", "linear", "bicubic"],
},
tags=["long"],
)
config_3d = op_bench.config_list(
# no channels_last for 3D tensors
attr_names=["input_size", "output_size"],
attrs=[
[(4, 512, 320), (256,)],
[(4, 512, 320), (512,)],
],
cross_product_configs={
'mode': ["nearest", "linear"],
},
tags=["long"],
)
config_5d = op_bench.config_list(
attr_names=["input_size", "output_size"],
attrs=[
[(1, 3, 16, 320, 320), (8, 256, 256)],
[(1, 3, 16, 320, 320), (32, 512, 512)],
# vectorization test-case
[(1, 16, 32, 64, 64), (16, 32, 32)],
[(1, 16, 32, 64, 64), (64, 128, 128)],
],
cross_product_configs={
'channels_last': [True, False],
'mode': ["nearest", "linear"],
},
tags=["long"],
)
for config in (config_short, config_long, config_3d, config_5d):
op_bench.generate_pt_test(config, InterpolateBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/interpolate_test.py |
import operator_benchmark as op_bench
import torch
"""
Microbenchmarks for the gelu operators.
"""
gelu_configs_long = op_bench.cross_product_configs(
N=[1, 4],
C=[3],
H=[16, 256],
W=[16, 256],
device=['cpu'],
tags=['long']
)
class GeluBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, C, H, W, device):
self.inputs = {
"input": torch.rand(N, C, H, W, device=device)
}
def forward(self, input):
return torch.nn.functional.gelu(input)
op_bench.generate_pt_test(gelu_configs_long, GeluBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/gelu_test.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for add_ operator. Supports both Caffe2/PyTorch."""
class BmmBenchmark(op_bench.TorchBenchmarkBase):
def init(self, B, M, N, K, device, op):
self.inputs = {
"batch1": torch.rand((B, M, K), device=device, requires_grad=self.auto_set()),
"batch2": torch.rand((B, K, N,), device=device, requires_grad=self.auto_set())
}
self.set_module_name(f"bmm (actual op={op}")
self.op = torch.bmm if op == "bmm" else torch.matmul
def forward(self, batch1, batch2):
return self.op(batch1, batch2)
bmm_configs = op_bench.cross_product_configs(
B=[2, 100],
M=[8, 256],
N=[256, 16],
K=[16, 32],
device=['cpu'],
tags=["short"],
op=["bmm", "matmul"],
)
op_bench.generate_pt_test(bmm_configs, BmmBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/bmm_test.py |
import operator_benchmark as op_bench
import torch
import torch.nn as nn
from pt import configs
"""Microbenchmarks for Linear operator."""
class LinearBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, IN, OUT, device):
self.inputs = {
"input_one": torch.rand(N, IN, device=device)
}
self.linear = nn.Linear(IN, OUT).to(device=device)
self.set_module_name("linear")
def forward(self, input_one):
return self.linear(input_one)
op_bench.generate_pt_test(configs.linear_configs_short + configs.linear_configs_long,
LinearBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/linear_test.py |
import torch
import torch.nn.quantized as nnq
import operator_benchmark as op_bench
r"""Microbenchmarks for the quantized activations."""
qactivation_long_configs = op_bench.cross_product_configs(
dims=(
# VGG-16 relu's with original shape: (-1, 3, 224, 224)
( 64, 224, 224), # ReLU-1 # noqa: E201
(128, 112, 112), # ReLU-6
(256, 56, 56), # ReLU-11 # noqa: E241
(512, 28, 28), # ReLU-18 # noqa: E241
(512, 14, 14), # ReLU-25 # noqa: E241
# Batch = 16
(16, 64, 224, 224), # ReLU-1 # noqa: E241
(16, 128, 112, 112), # ReLU-6
(16, 256, 56, 56), # ReLU-11 # noqa: E241
(16, 512, 28, 28), # ReLU-18 # noqa: E241
(16, 512, 14, 14), # ReLU-25 # noqa: E241
),
contig=(False, True),
inplace=(False, True),
dtype=(torch.quint8,),
tags=('long',)
)
qactivation_short_configs = op_bench.cross_product_configs(
dims=(
(3, 4, 5), # Rank=3
(2, 3, 4, 5), # Rank=4,
# Dimensions from the floating point benchmarks
(512, 512),
(256, 1024),
),
contig=(False,),
inplace=(False,),
dtype=(torch.quint8, torch.qint8, torch.qint32),
tags=('short',)
)
qactivation_ops = op_bench.op_list(
attrs=(
('relu', torch.nn.ReLU()),
('relu6', torch.ops.quantized.relu6),
('functional.hardtanh', nnq.functional.hardtanh),
('functional.hardsigmoid', nnq.functional.hardsigmoid),
('functional.leaky_relu', nnq.functional.leaky_relu),
('functional.sigmoid', torch.nn.functional.sigmoid),
('functional.tanh', torch.nn.functional.tanh),
),
attr_names=('op_name', 'op_func'),
)
class QActivationBenchmarkBase(op_bench.TorchBenchmarkBase):
r"""Base class for all the activations."""
def _setup(self, dims, contig, dtype):
# Input
f_input = (torch.rand(*dims) - 0.5) * 256
self.scale = 1.0
self.zero_point = 0
# Quantize the tensor
q_input = torch.quantize_per_tensor(f_input, scale=self.scale,
zero_point=self.zero_point,
dtype=dtype)
if not contig:
# Make non-contiguous
new_shape = list(range(q_input.ndim))[::-1]
q_input = q_input.permute(new_shape)
self.inputs = {
"q_input": q_input
}
def init(self, dims, contig, inplace, dtype, op_func):
self._setup(dims, contig, dtype)
self.qop = op_func
class QActivationBenchmark(QActivationBenchmarkBase):
def forward(self, q_input):
return self.qop(q_input)
op_bench.generate_pt_tests_from_op_list(qactivation_ops,
qactivation_short_configs + qactivation_long_configs,
QActivationBenchmark)
qactivation_scale_zero_point_ops = op_bench.op_list(
attrs=(
('functional.hardswish', nnq.functional.hardswish),
('functional.elu', nnq.functional.elu),
('functional.celu', nnq.functional.celu),
),
attr_names=('op_name', 'op_func'),
)
class QActivationScaleZeroPointBenchmark(QActivationBenchmarkBase):
def forward(self, q_input):
return self.qop(q_input, scale=self.scale, zero_point=self.zero_point)
op_bench.generate_pt_tests_from_op_list(qactivation_scale_zero_point_ops,
qactivation_short_configs + qactivation_long_configs,
QActivationScaleZeroPointBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/qactivation_test.py |
import operator_benchmark as op_bench
import torch
import torch.nn as nn
from pt import configs
"""
Microbenchmarks for Conv1d and ConvTranspose1d operators.
"""
class Conv1dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, IC, OC, kernel, stride, N, L, device):
self.inputs = {
"input": torch.rand(N, IC, L, device=device, requires_grad=self.auto_set())
}
self.conv1d = nn.Conv1d(IC, OC, kernel, stride=stride).to(device=device)
self.set_module_name('Conv1d')
def forward(self, input):
return self.conv1d(input)
class ConvTranspose1dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, IC, OC, kernel, stride, N, L, device):
self.inputs = {
"input": torch.rand(N, IC, L, device=device)
}
self.convtranspose1d = nn.ConvTranspose1d(IC, OC, kernel, stride=stride).to(device=device)
self.set_module_name('ConvTranspose1d')
def forward(self, input):
return self.convtranspose1d(input)
op_bench.generate_pt_test(configs.conv_1d_configs_short + configs.conv_1d_configs_long,
Conv1dBenchmark)
op_bench.generate_pt_test(configs.conv_1d_configs_short + configs.conv_1d_configs_long,
ConvTranspose1dBenchmark)
"""
Microbenchmarks for Conv2d and ConvTranspose2d operators.
"""
class Conv2dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, IC, OC, kernel, stride, N, H, W, G, pad, device):
self.inputs = {
"input": torch.rand(N, IC, H, W, device=device)
}
self.conv2d = nn.Conv2d(
IC, OC, kernel, stride=stride, groups=G, padding=pad).to(device=device)
self.set_module_name('Conv2d')
def forward(self, input):
return self.conv2d(input)
class ConvTranspose2dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, IC, OC, kernel, stride, N, H, W, G, pad, device):
self.inputs = {
"input": torch.rand(N, IC, H, W, device=device)
}
self.convtranspose2d = nn.ConvTranspose2d(
IC, OC, kernel, stride=stride, groups=G, padding=pad).to(device=device)
self.set_module_name('ConvTranspose2d')
def forward(self, input):
return self.convtranspose2d(input)
op_bench.generate_pt_test(configs.conv_2d_configs_short + configs.conv_2d_configs_long,
Conv2dBenchmark)
op_bench.generate_pt_test(configs.conv_2d_configs_short + configs.conv_2d_configs_long,
ConvTranspose2dBenchmark)
"""
Microbenchmarks for Conv3d and ConvTranspose3d operators.
"""
class Conv3dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, IC, OC, kernel, stride, N, D, H, W, device):
self.inputs = {
"input": torch.rand(N, IC, D, H, W, device=device)
}
self.conv3d = nn.Conv3d(IC, OC, kernel, stride=stride).to(device=device)
self.set_module_name('Conv3d')
def forward(self, input):
return self.conv3d(input)
class ConvTranspose3dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, IC, OC, kernel, stride, N, D, H, W, device):
self.inputs = {
"input": torch.rand(N, IC, D, H, W, device=device)
}
self.convtranspose3d = nn.ConvTranspose3d(IC, OC, kernel, stride=stride).to(device=device)
self.set_module_name('ConvTranspose3d')
def forward(self, input):
return self.convtranspose3d(input)
op_bench.generate_pt_test(configs.conv_3d_configs_short, Conv3dBenchmark)
op_bench.generate_pt_test(configs.conv_3d_configs_short,
ConvTranspose3dBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/conv_test.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for Split operator"""
# Configs for PT Split operator
split_configs_short = op_bench.config_list(
attr_names=["M", "N", "parts"],
attrs=[
[8, 8, 2],
[256, 512, 2],
[512, 512, 2],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=["short"],
)
split_configs_long = op_bench.cross_product_configs(
M=[128, 1024],
N=[128, 1024],
parts=[2, 4],
device=['cpu', 'cuda'],
tags=['long']
)
class SplitBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, parts, device):
self.inputs = {
"input": torch.rand(M, N, device=device),
"split_size": int(M * N / parts)
}
self.set_module_name('split')
def forward(self, input, split_size: int):
return torch.split(input, split_size)
op_bench.generate_pt_test(split_configs_short + split_configs_long,
SplitBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/split_test.py |
pytorch-master | benchmarks/operator_benchmark/pt/__init__.py |
|
import operator_benchmark as op_bench
import torch
import torch.nn.qat as nnqat
import numpy
from pt import configs
from torch.ao.quantization import default_embedding_qat_qconfig
"""
Microbenchmarks for QAT Embedding + EmbeddingBag operators.
"""
class QATEmbeddingBagBenchmark(op_bench.TorchBenchmarkBase):
def init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset, device):
qconfig = default_embedding_qat_qconfig
self.embedding = nnqat.EmbeddingBag(
num_embeddings=embeddingbags,
embedding_dim=dim,
mode=mode,
include_last_offset=include_last_offset,
sparse=sparse, device=device, qconfig=qconfig)
numpy.random.seed((1 << 32) - 1)
offsets = torch.LongTensor([offset], device=device)
input = torch.tensor(numpy.random.randint(0, embeddingbags, input_size), device=device).long()
self.inputs = {
"input": input,
"offset": torch.cat((offsets, torch.tensor([input.size(0)], dtype=torch.long)), 0)
}
self.set_module_name('qatEmbeddingBag')
def forward(self, input, offset):
return self.embedding(input, offset)
# Currently, EmbeddingBag QAT does not support sparse embeddings.
embeddingbag_short_dense_configs = [config for config in configs.embeddingbag_short_configs
if {'sparse': True} not in config]
op_bench.generate_pt_test(embeddingbag_short_dense_configs, QATEmbeddingBagBenchmark)
op_bench.generate_pt_gradient_test(embeddingbag_short_dense_configs, QATEmbeddingBagBenchmark)
class QATEmbeddingBenchmark(op_bench.TorchBenchmarkBase):
def init(self, num_embeddings, embedding_dim, input_size, device):
qconfig = default_embedding_qat_qconfig
self.embedding = nnqat.Embedding(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
qconfig=qconfig, device=device)
self.embedding.qconfig = default_embedding_qat_qconfig
numpy.random.seed((1 << 32) - 1)
self.input = torch.tensor(numpy.random.randint(0, num_embeddings, input_size),
device=device).long()
self.inputs = {"input": self.input}
self.set_module_name('qatEmbedding')
def forward(self, input):
return self.embedding(input)
op_bench.generate_pt_test(configs.embedding_short_configs, QATEmbeddingBenchmark)
op_bench.generate_pt_gradient_test(configs.embedding_short_configs, QATEmbeddingBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/qatembedding_ops_test.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for binary operators."""
# Benchmark ops performance with broadcast
binary_ops_bcast_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['add', torch.add],
],
)
# Configs with broadcast
binary_configs_broadcast = op_bench.config_list(
attr_names=['in_one', 'in_two'],
attrs=[
[[64, 1, 64], [1, 64, 1]],
],
cross_product_configs={
'device': ['cpu'],
'dtype': [torch.float],
},
tags=["short"]
)
class BinaryOpBcastBenchmark(op_bench.TorchBenchmarkBase):
def init(self, in_one, in_two, dtype, device, op_func):
self.inputs = {
"in_one": torch.randn(in_one, device=device).to(dtype=dtype),
"in_two": torch.randn(in_two, device=device).to(dtype=dtype)
}
self.op_func = op_func
def forward(self, in_one, in_two):
return self.op_func(in_one, in_two)
op_bench.generate_pt_tests_from_op_list(binary_ops_bcast_list,
binary_configs_broadcast,
BinaryOpBcastBenchmark)
def copy(in1, in2):
return in1.copy_(in2)
# Benchmark ops performance without broadcast
binary_ops_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['add', torch.add],
['copy_', copy],
],
)
binary_short_configs = op_bench.config_list(
attr_names=['M', 'N', 'K'],
attrs=[
[1, 1, 1],
[64, 64, 64],
[64, 64, 128],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
'dtype_one' : [torch.int32],
'dtype_two' : [torch.int32],
},
tags=['short'],
)
binary_long_configs = op_bench.cross_product_configs(
M=[8, 128],
N=[32, 64],
K=[256, 512],
device=['cpu', 'cuda'],
dtype_one=[torch.int8, torch.int32],
dtype_two=[torch.int8, torch.int32],
tags=['long']
)
class BinaryOpBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device, dtype_one, dtype_two, op_func):
self.inputs = {
"input_one": torch.randn(M, N, K, device=device).to(dtype=dtype_one),
"input_two": torch.randn(M, N, K, device=device).to(dtype=dtype_two)
}
self.op_func = op_func
def forward(self, input_one, input_two):
return self.op_func(input_one, input_two)
op_bench.generate_pt_tests_from_op_list(binary_ops_list,
binary_short_configs + binary_long_configs,
BinaryOpBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/binary_test.py |
import operator_benchmark as op_bench
import torch
import random
from typing import List
"""Microbenchmarks for Cat operator"""
cross_product_configs = {
'device': ['cpu', 'cuda'],
}
# Configs for PT Cat operator
cat_configs_short = op_bench.config_list(
attr_names=['sizes', 'N', 'dim'],
attrs=[
[(1, 1, 1), 2, 0], # noqa: E241
[(512, 512, 2), 2, 1], # noqa: E241
[(128, 1024, 2), 2, 1], # noqa: E241
],
cross_product_configs=cross_product_configs,
tags=['short'],
)
# Configs specific to static runtime feature - a fast path runtime for pared down models
cat_configs_static_runtime = op_bench.config_list(
attr_names=['sizes', 'N', 'dim'],
attrs=[
[[(1, 160), (1, 14)], -1, 1],
[[(1, 20, 40), (1, 4, 40), (1, 5, 40)], -1, 1],
[[(1, 580), (1, 174)], -1, 1],
[[(20, 160), (20, 14)], -1, 1],
[[(20, 20, 40), (20, 4, 40), (20, 5, 40)], -1, 1],
[[(20, 580), (20, 174)], -1, 1],
],
cross_product_configs=cross_product_configs,
tags=['static_runtime'],
)
cat_configs_long = op_bench.config_list(
attr_names=['sizes', 'N', 'dim'],
attrs=[
[(2**10, 2**10, 2), 2, 0], # noqa: E241
[(2**10+1, 2**10-1, 2), 2, 1], # noqa: E226,E241
[(2**10, 2**10, 2), 2, 2], # noqa: E241
[[ lambda: random.randint(2**6, 2**7), 2**7-17, 2**6+1], # noqa: E201,E226,E241
5, 0],
[[ 2**6+2**5, lambda: random.randint(2**6, 2**7), 2**6], # noqa: E201,E226,E241,E272
5, 1],
[[ 2**7, 2**6, lambda: random.randint(2**6, 2**7)], # noqa: E201,E241,E272
5, 2],
[[lambda: random.randint(2**5, 2**6), 2**5, 2**6], # noqa: E241
50, 0],
[[2**5, lambda: random.randint(2**5, 2**6), 2**6], # noqa: E241,E272
50, 1],
[[2**5+1, 2**6+1, lambda: random.randint(2**5, 2**6)], # noqa: E226,E241,E272
50, 2],
],
cross_product_configs=cross_product_configs,
tags=['long'],
)
# There is a different codepath on CUDA for >4 dimensions
cat_configs_multidim = op_bench.config_list(
attr_names=['sizes', 'N', 'dim'],
attrs=[
[(2**6, 2**5, 2**2, 2**4, 2**5), 2, 2], # noqa: E241
[(2**4, 2**5, 2**2, 2**4, 2**5), 8, 2], # noqa: E241
[(2**3+1, 2**5-1, 2**2+1, 2**4-1, 2**5+1), 17, 4], # noqa: E226,E241
],
cross_product_configs=cross_product_configs,
tags=['multidim'],
)
cat_configs_manyinputs = op_bench.config_list(
attr_names=['sizes', 'N', 'dim'],
attrs=[
[[lambda: random.randint(1, 10000)], 100, 0],
[[lambda: random.randint(1, 1000)], 1000, 0],
[[lambda: random.randint(1, 500)], 2000, 0],
[[lambda: random.randint(1, 300)], 3000, 0],
],
cross_product_configs=cross_product_configs,
tags=['manyinputs'],
)
class CatBenchmark(op_bench.TorchBenchmarkBase):
def init(self, sizes, N, dim, device):
random.seed(42)
inputs = []
gen_sizes = []
if type(sizes) == list and N == -1:
gen_sizes = sizes
else:
for i in range(N):
gen_sizes.append([old_size() if callable(old_size) else old_size for old_size in sizes])
for s in gen_sizes:
inputs.append(torch.rand(s, device=device))
result = torch.empty(0, device=device)
self.inputs = {
"result": result,
"inputs": inputs,
"dim": dim
}
self.set_module_name('cat')
def forward(self, result: torch.Tensor, inputs: List[torch.Tensor], dim: int):
return torch.cat(inputs, dim=dim, out=result)
op_bench.generate_pt_test(cat_configs_short +
cat_configs_long +
cat_configs_multidim +
cat_configs_manyinputs +
cat_configs_static_runtime,
CatBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/cat_test.py |
import operator_benchmark as op_bench
import torch
import random
from typing import List
"""Microbenchmarks for Stack operator"""
# Configs for PT stack operator
stack_configs_static_runtime = op_bench.config_list(
attr_names=['sizes', 'N'],
attrs=[
[(20, 40), 5],
[(1, 40), 5],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
'dim': list(range(3))
},
tags=['static_runtime'],
)
stack_configs_short = op_bench.config_list(
attr_names=['sizes', 'N'],
attrs=[
[(1, 1, 1), 2], # noqa: E241
[(512, 512, 2), 2], # noqa: E241
[(128, 1024, 2), 2], # noqa: E241
],
cross_product_configs={
'device': ['cpu', 'cuda'],
'dim': list(range(4))
},
tags=['short'],
)
stack_configs_long = op_bench.config_list(
attr_names=['sizes', 'N'],
attrs=[
[(2**10, 2**10, 2), 2], # noqa: E241
[(2**10+1, 2**10-1, 2), 2], # noqa: E226,E241
[(2**10, 2**10, 2), 2], # noqa: E241
],
cross_product_configs={
'device': ['cpu', 'cuda'],
'dim': list(range(4))
},
tags=['long'],
)
# There is a different codepath on CUDA for >4 dimensions
stack_configs_multidim = op_bench.config_list(
attr_names=['sizes', 'N'],
attrs=[
[(2**6, 2**5, 2**2, 2**4, 2**5), 2], # noqa: E241
[(2**4, 2**5, 2**2, 2**4, 2**5), 8], # noqa: E241
[(2**3+1, 2**5-1, 2**2+1, 2**4-1, 2**5+1), 17], # noqa: E226,E241
],
cross_product_configs={
'device': ['cpu', 'cuda'],
'dim': list(range(6))
},
tags=['multidim'],
)
class StackBenchmark(op_bench.TorchBenchmarkBase):
def init(self, sizes, N, dim, device):
random.seed(42)
inputs = []
gen_sizes = []
if type(sizes) == list and N == -1:
gen_sizes = sizes
else:
for i in range(N):
gen_sizes.append([old_size() if callable(old_size) else old_size for old_size in sizes])
for s in gen_sizes:
inputs.append(torch.rand(s, device=device))
result = torch.rand(gen_sizes[0], device=device)
self.inputs = {
"result": result,
"inputs": inputs,
"dim": dim
}
self.set_module_name('stack')
def forward(self, result: torch.Tensor, inputs: List[torch.Tensor], dim: int):
return torch.stack(inputs, dim=dim, out=result)
op_bench.generate_pt_test(stack_configs_static_runtime +
stack_configs_short +
stack_configs_long +
stack_configs_multidim,
StackBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/stack_test.py |
import operator_benchmark as op_bench
import torch
"""
Microbenchmarks for batch matrix mult with einsum and torch.bmm.
"""
batch_mm_configs_short = op_bench.config_list(
attr_names=["B", "M", "N", "K"],
attrs=[
[4, 5, 3, 2],
[32, 25, 20, 30],
[128, 100, 120, 110],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=["short"],
)
batch_mm_configs_long = op_bench.config_list(
attr_names=["B", "M", "N", "K"],
attrs=[
[128, 256, 128, 256],
[512, 1024, 1024, 512],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=["long"],
)
batch_mm_op_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['einsum_bmm', torch.einsum],
['bmm', torch.bmm],
],
)
class BatchMatrixMultBenchmark(op_bench.TorchBenchmarkBase):
def init(self, B, M, N, K, device, op_func):
self.inputs = {
"input_one": torch.rand(B, M, N, device=device),
"input_two": torch.rand(B, N, K, device=device)
}
self.op_func = op_func
def forward(self, input_one, input_two):
if self.op_func.__name__ == "einsum":
return torch.einsum('bij,bjk->bik', input_one, input_two)
else:
return torch.bmm(input_one, input_two)
"""
Microbenchmarks for element-wise matrix mult with einsum and torch.mul.
"""
batch_elementwise_configs_short = op_bench.config_list(
attr_names=["B", "M", "N"],
attrs=[
[4, 5, 3],
[32, 25, 20],
[100, 90, 110],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=["short"],
)
batch_elementwise_configs_long = op_bench.cross_product_configs(
B=[128, 512, 1024],
M=[128, 512, 1024],
N=[128, 512, 1024],
device=['cpu', 'cuda'],
tags=['long']
)
batch_elementwise_op_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['einsum_elementwise', torch.einsum],
['mul', torch.mul],
],
)
class BatchElementWiseBenchmark(op_bench.TorchBenchmarkBase):
def init(self, B, M, N, device, op_func):
self.inputs = {
"input_one": torch.rand(B, M, N, device=device),
"input_two": torch.rand(B, M, N, device=device)
}
self.op_func = op_func
def forward(self, input_one, input_two):
if self.op_func.__name__ == "einsum":
return torch.einsum('bij,bij->bij', input_one, input_two)
else:
return torch.mul(input_one, input_two)
op_bench.generate_pt_tests_from_op_list(
batch_mm_op_list,
batch_mm_configs_short + batch_mm_configs_long,
BatchMatrixMultBenchmark,
)
op_bench.generate_pt_tests_from_op_list(
batch_elementwise_op_list,
batch_elementwise_configs_short + batch_elementwise_configs_long,
BatchElementWiseBenchmark,
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/matrix_mult_test.py |
import operator_benchmark as op_bench
import torch
import torch.nn.functional as F
"""Microbenchmarks for groupnorm operator."""
groupnorm_configs_short = op_bench.cross_product_configs(
dims=(
(32, 8, 16),
(32, 8, 56, 56),
),
num_groups=(2, 4),
tags=["short"],
)
class GroupNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims, num_groups):
num_channels = dims[1]
self.inputs = {
"input": (torch.rand(*dims) - 0.5) * 256,
"num_groups": num_groups,
"weight": torch.rand(num_channels, dtype=torch.float),
"bias": torch.rand(num_channels, dtype=torch.float),
"eps": 1e-5
}
def forward(self, input, num_groups: int, weight, bias, eps: float):
return F.group_norm(
input, num_groups, weight=weight, bias=bias, eps=eps)
op_bench.generate_pt_test(groupnorm_configs_short, GroupNormBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/groupnorm_test.py |
import operator_benchmark as op_bench
import torch
import torch.nn.functional as F
"""Microbenchmarks for layernorm operator."""
layernorm_configs_short = op_bench.cross_product_configs(
dims=(
(1, 8, 16),
(8, 8, 16),
(32, 8, 16),
(64, 128, 56, 56),
),
tags=["short"],
)
class LayerNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims):
input = (torch.rand(*dims) - 0.5) * 256
self.inputs = {
"input": input,
"weight": torch.rand(*input.size()[1:], dtype=torch.float),
"bias": torch.rand(*input.size()[1:], dtype=torch.float),
"eps": 1e-5
}
def forward(self, input, weight, bias, eps: float):
return F.layer_norm(
input, input.size()[1:], weight=weight, bias=bias, eps=eps)
op_bench.generate_pt_test(layernorm_configs_short, LayerNormBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/layernorm_test.py |
import operator_benchmark as op_bench
import torch
import torch.nn as nn
"""
Microbenchmarks for the hardsigmoid operator.
"""
# Configs for hardsigmoid ops
hardsigmoid_configs_short = op_bench.config_list(
attr_names=[
'N', 'C', 'H', 'W'
],
attrs=[
[1, 3, 256, 256],
[4, 3, 256, 256],
],
cross_product_configs={
'device': ['cpu'],
},
tags=['short']
)
hardsigmoid_configs_long = op_bench.cross_product_configs(
N=[8, 16],
C=[3],
H=[256, 512],
W=[256, 512],
device=['cpu'],
tags=['long']
)
hardsigmoid_ops_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['Hardsigmoid', nn.Hardsigmoid],
],
)
class HardsigmoidBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, C, H, W, device, op_func):
self.inputs = {
"input_one": torch.rand(N, C, H, W, device=device)
}
self.op_func = op_func()
def forward(self, input_one):
return self.op_func(input_one)
op_bench.generate_pt_tests_from_op_list(hardsigmoid_ops_list,
hardsigmoid_configs_short + hardsigmoid_configs_long,
HardsigmoidBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/hardsigmoid_test.py |
import operator_benchmark as op_bench
import torch
# Configs for pointwise and reduction unary ops
qmethods_configs_short = op_bench.config_list(
attr_names=['M', 'N'],
attrs=[
[32, 32],
],
cross_product_configs={
'dtype': [torch.quint8],
'contig': [False, True],
},
tags=['short']
)
qmethods_configs_long = op_bench.cross_product_configs(
M=[256, 1024],
N=[256, 1024],
dtype=[torch.qint8, torch.qint32],
contig=[False, True],
tags=['long']
)
class _QMethodBenchmarkBase(op_bench.TorchBenchmarkBase):
def init(self, M, N, dtype, contig):
f_input = torch.rand(M, N)
scale = 1.0
zero_point = 0
self.q_input = torch.quantize_per_tensor(f_input, scale=scale,
zero_point=zero_point,
dtype=dtype)
if not contig:
permute_dims = list(range(self.q_input.ndim))[::-1]
self.q_input = self.q_input.permute(permute_dims)
self.inputs = {
"q_input": self.q_input,
}
class QMethodTensorInputCopyBenchmark(_QMethodBenchmarkBase):
def forward(self, q_input):
return q_input.copy_(q_input)
op_bench.generate_pt_test(
qmethods_configs_short + qmethods_configs_long,
QMethodTensorInputCopyBenchmark
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/qtensor_method_test.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for Chunk operator"""
# Configs for PT Chunk operator
chunk_short_configs = op_bench.config_list(
attr_names=["M", "N", "chunks"],
attrs=[
[8, 8, 2],
[256, 512, 2],
[512, 512, 2],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=["short"],
)
chunks_long_configs = op_bench.cross_product_configs(
M=[128, 1024],
N=[128, 1024],
chunks=[2, 4],
device=['cpu', 'cuda'],
tags=['long']
)
class ChunkBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, chunks, device):
self.inputs = {
"input_one": torch.rand(M, N, device=device),
"chunks": chunks
}
self.set_module_name("chunk")
def forward(self, input_one, chunks: int):
return torch.chunk(input_one, chunks)
op_bench.generate_pt_test(chunk_short_configs + chunks_long_configs,
ChunkBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/chunk_test.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for sum reduction operator."""
# Configs for PT add operator
sum_configs = op_bench.cross_product_configs(
R=[64, 256], # Length of reduced dimension
V=[32, 512], # Length of other dimension
dim=[0, 1],
contiguous=[True, False],
device=['cpu', 'cuda'],
tags=['short']
) + op_bench.cross_product_configs(
R=[1024, 8192],
V=[512, 1024],
dim=[0, 1],
contiguous=[True, False],
device=['cpu', 'cuda'],
tags=['long']
)
class SumBenchmark(op_bench.TorchBenchmarkBase):
def init(self, R, V, dim, contiguous, device):
shape = (R, V) if dim == 0 else (V, R)
tensor = torch.rand(shape, device=device)
if not contiguous:
storage = torch.empty([s * 2 for s in shape], device=device)
storage[::2, ::2] = tensor
self.input_tensor = storage[::2, ::2]
else:
self.input_tensor = tensor
self.inputs = {
"input_tensor": self.input_tensor,
"dim": dim
}
self.set_module_name("sum")
def forward(self, input_tensor, dim: int):
return input_tensor.sum(dim=dim)
op_bench.generate_pt_test(sum_configs, SumBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/sum_test.py |
import operator_benchmark as op_bench
import torch
import torch.nn as nn
"""
Microbenchmarks for the softmax operators.
"""
# Configs for softmax ops
softmax_configs_short = op_bench.config_list(
attr_names=[
'N', 'C', 'H', 'W'
],
attrs=[
[1, 3, 256, 256],
[4, 3, 256, 256],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=['short']
)
softmax_configs_long = op_bench.cross_product_configs(
N=[8, 16],
C=[3],
H=[256, 512],
W=[256, 512],
device=['cpu', 'cuda'],
tags=['long']
)
softmax_ops_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['Softmax', nn.Softmax],
['Softmax2d', nn.Softmax2d],
['LogSoftmax', nn.LogSoftmax],
],
)
class SoftmaxBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, C, H, W, device, op_func):
self.inputs = {
"input": torch.rand(N, C, H, W, device=device)
}
self.op_func = op_func()
def forward(self, input):
return self.op_func(input)
op_bench.generate_pt_tests_from_op_list(softmax_ops_list,
softmax_configs_short + softmax_configs_long,
SoftmaxBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/softmax_test.py |
import operator_benchmark as op_bench
import torch
import torch.nn as nn
"""
Microbenchmarks for the hardswish operators.
"""
# Configs for hardswish ops
hardswish_configs_short = op_bench.config_list(
attr_names=[
'N', 'C', 'H', 'W'
],
attrs=[
[1, 3, 256, 256],
[4, 3, 256, 256],
],
cross_product_configs={
'device': ['cpu'],
},
tags=['short']
)
hardswish_configs_long = op_bench.cross_product_configs(
N=[8, 16],
C=[3],
H=[256, 512],
W=[256, 512],
device=['cpu'],
tags=['long']
)
hardswish_ops_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['Hardswish', nn.Hardswish],
],
)
class HardswishBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, C, H, W, device, op_func):
self.inputs = {
"input_one": torch.rand(N, C, H, W, device=device)
}
self.op_func = op_func()
def forward(self, input_one):
return self.op_func(input_one)
op_bench.generate_pt_tests_from_op_list(hardswish_ops_list,
hardswish_configs_short + hardswish_configs_long,
HardswishBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/hardswish_test.py |
import operator_benchmark as op_bench
import torch
import torch.nn.functional as F
"""Microbenchmarks for batchnorm operator."""
# Benchmark cudnn if available
if torch.backends.cudnn.is_available:
def cudnn_benchmark_configs(configs):
result = []
for config in configs:
is_cuda = any('cuda' in attr.values() for attr in config)
if is_cuda:
result.append((*config, dict(cudnn=True)))
result.append((*config, dict(cudnn=False)))
return result
else:
def cudnn_benchmark_configs(configs):
return [(*config, dict(cudnn=False)) for config in configs]
batchnorm_configs_short = cudnn_benchmark_configs(op_bench.config_list(
attr_names=["M", "N", "K"],
attrs=[
[1, 256, 3136],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
'training': [True, False],
},
tags=["short"]
))
batchnorm_configs_long = cudnn_benchmark_configs(op_bench.cross_product_configs(
M=[2, 128],
N=[8192, 2048],
K=[1],
device=['cpu', 'cuda'],
training=[True, False],
tags=["long"]
))
class BatchNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device, training, cudnn):
self.inputs = {
"input_one": torch.rand(M, N, K, device=device, requires_grad=self.auto_set()),
"mean": torch.rand(N, device=device),
"var": torch.rand(N, device=device),
"weight": torch.rand(N, device=device),
"bias": torch.rand(N, device=device),
"training": training,
"cudnn": cudnn,
}
self.set_module_name("batchnorm")
def forward(self, input_one, mean, var, weight, bias, training, cudnn):
with torch.backends.cudnn.flags(enabled=cudnn):
return F.batch_norm(input_one, mean, var, weight, bias, training)
op_bench.generate_pt_test(batchnorm_configs_short + batchnorm_configs_long, BatchNormBenchmark)
op_bench.generate_pt_gradient_test(batchnorm_configs_short + batchnorm_configs_long, BatchNormBenchmark)
batchnorm1d_configs_short = cudnn_benchmark_configs(op_bench.config_list(
attr_names=["N", "C"],
attrs=[
[3136, 256],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
'training': [True, False],
},
tags=["short"]
))
batchnorm1d_configs_long = cudnn_benchmark_configs(op_bench.cross_product_configs(
N=[2, 128],
C=[8192, 2048],
device=['cpu', 'cuda'],
training=[True, False],
tags=["long"]
))
class BatchNorm1dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, C, device, training, cudnn):
self.inputs = {
"input_one": torch.rand(N, C, device=device, requires_grad=self.auto_set()),
"mean": torch.rand(C, device=device),
"var": torch.rand(C, device=device),
"weight": torch.rand(C, device=device),
"bias": torch.rand(C, device=device),
"training": training,
"cudnn": cudnn,
}
self.set_module_name("batchnorm")
def forward(self, input_one, mean, var, weight, bias, training, cudnn):
with torch.backends.cudnn.flags(enabled=cudnn):
return F.batch_norm(input_one, mean, var, weight, bias, training)
op_bench.generate_pt_test(batchnorm1d_configs_short + batchnorm1d_configs_long, BatchNorm1dBenchmark)
op_bench.generate_pt_gradient_test(batchnorm1d_configs_short + batchnorm1d_configs_long, BatchNorm1dBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/batchnorm_test.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for channel_shuffle operator."""
# Configs for PT channel_shuffle operator
channel_shuffle_long_configs = op_bench.cross_product_configs(
batch_size=[4, 8],
channels_per_group=[32, 64],
height=[32, 64],
width=[32, 64],
groups=[4, 8],
channel_last=[True, False],
tags=["long"]
)
channel_shuffle_short_configs = op_bench.config_list(
attr_names=["batch_size", "channels_per_group", "height", "width", "groups"],
attrs=[
[2, 16, 16, 16, 2],
[2, 32, 32, 32, 2],
[4, 32, 32, 32, 4],
[4, 64, 64, 64, 4],
[8, 64, 64, 64, 8],
[16, 64, 64, 64, 16],
],
cross_product_configs={
"channel_last": [True, False],
},
tags=["short"]
)
class ChannelSHuffleBenchmark(op_bench.TorchBenchmarkBase):
def init(self, batch_size, channels_per_group, height, width, groups, channel_last):
channels = channels_per_group * groups
data_shape = (batch_size, channels, height, width)
input_data = torch.rand(data_shape)
if channel_last:
input_data = input_data.contiguous(memory_format=torch.channels_last)
self.inputs = {
"input_data": input_data,
"groups": groups
}
self.set_module_name('channel_shuffle')
def forward(self, input_data, groups: int):
return torch.channel_shuffle(input_data, groups)
op_bench.generate_pt_test(channel_shuffle_short_configs + channel_shuffle_long_configs,
ChannelSHuffleBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/channel_shuffle_test.py |
import operator_benchmark as op_bench
import torch
import math
"""Microbenchmarks for torch.nan_to_num / nan_to_num_ operators"""
# Configs for PT torch.nan_to_num / nan_to_num_ operators
nan_to_num_ops_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['nan_to_num', torch.nan_to_num],
['nan_to_num_', torch.nan_to_num_],
],
)
nan_to_num_long_configs = op_bench.cross_product_configs(
M=[32, 64, 128],
N=range(32, 128, 32),
dtype=[torch.float, torch.double],
replace_inf=[True, False],
tags=["long"],
)
nan_to_num_short_configs = op_bench.cross_product_configs(
M=[16, 64],
N=[64, 64],
dtype=[torch.float, torch.double],
replace_inf=[True, False],
tags=["short"],
)
class ReplaceNaNBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, dtype, replace_inf, op_func):
input = torch.randn(M, N, dtype=dtype)
input[0][0] = float("nan")
self.inputs = {
"input": input,
"replace_inf": replace_inf
}
self.op_func = op_func
self.set_module_name("nan_to_num")
def forward(self, input, replace_inf: bool):
# compare inplace
if replace_inf:
return self.op_func(input, nan=1.0)
else:
return self.op_func(input, nan=1.0, posinf=math.inf, neginf=-math.inf)
op_bench.generate_pt_tests_from_op_list(
nan_to_num_ops_list,
nan_to_num_long_configs + nan_to_num_short_configs,
ReplaceNaNBenchmark,
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/nan_to_num_test.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for quantized instancenorm operator."""
instancenorm_configs_short = op_bench.cross_product_configs(
dims=(
(32, 8, 16),
(32, 8, 56, 56),
),
dtype=(torch.qint8,),
tags=["short"],
)
class QInstanceNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims, dtype):
X = (torch.rand(*dims) - 0.5) * 256
num_channels = dims[1]
scale = 1.0
zero_point = 0
self.inputs = {
"qX": torch.quantize_per_tensor(
X, scale=scale, zero_point=zero_point, dtype=dtype),
"weight": torch.rand(num_channels, dtype=torch.float),
"bias": torch.rand(num_channels, dtype=torch.float),
"eps": 1e-5,
"Y_scale": 0.1,
"Y_zero_point": 0
}
def forward(self, qX, weight, bias, eps: float, Y_scale: float, Y_zero_point: int):
return torch.ops.quantized.instance_norm(
qX, weight=weight, bias=bias,
eps=eps, output_scale=Y_scale,
output_zero_point=Y_zero_point)
op_bench.generate_pt_test(instancenorm_configs_short, QInstanceNormBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/qinstancenorm_test.py |
import operator_benchmark as op_bench
import torch
'''Microbenchmarks for the quantized interpolate op.
Note: We are not benchmarking `upsample` as it is being depricated, and calls
the `interpolate` anyway.
'''
qinterpolate_long_configs = op_bench.config_list(
attr_names=['M', 'N', 'K'],
attrs=[
[512, 512, 512],
],
cross_product_configs={
'dtype': [torch.quint8, torch.qint8, torch.qint32],
'mode': ['nearest', 'bilinear'],
'scale': [0.5, 1.0, 2.0],
'contig': [True], # TODO: Add `False` after #29435
},
tags=['long']
)
qinterpolate_short_configs = op_bench.config_list(
attr_names=['M', 'N', 'K', 'dtype', 'mode', 'scale', 'contig'],
attrs=[
[32, 32, 32, torch.quint8, 'nearest', 0.5, True], # Downsample
[32, 32, 32, torch.quint8, 'bilinear', 0.5, True], # Downsample
[32, 32, 32, torch.quint8, 'nearest', 2.0, True], # Upsample
[32, 32, 32, torch.quint8, 'bilinear', 2.0, True], # Upsample
[3, 720, 1280, torch.quint8, 'bilinear', 0.83333, True], # Downsample
],
tags=['short'],
)
class QInterpolateBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, dtype, mode, scale, contig):
f_input = (torch.rand(1, M, N, K) - 0.5) * 256
scale = 0.1
zero_point = 42
self.q_input = torch.quantize_per_tensor(f_input, scale=scale,
zero_point=zero_point,
dtype=dtype)
if not contig:
permute_dims = list(range(self.q_input.ndim))[::-1]
self.q_input = self.q_input.permute(permute_dims)
self.inputs = {
"q_input": self.q_input,
"scale_factor": scale,
"mode": mode
}
self.set_module_name('q_interpolate')
def forward(self, q_input, scale_factor: float, mode: str):
return torch.nn.functional.interpolate(
q_input, scale_factor=scale_factor, mode=mode)
op_bench.generate_pt_test(qinterpolate_short_configs + qinterpolate_long_configs,
QInterpolateBenchmark)
if __name__ == '__main__':
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/qinterpolate_test.py |
import torch
from torch._ops import ops
import operator_benchmark as op_bench
qarithmetic_binary_configs = op_bench.cross_product_configs(
N=(2, 8, 64, 512),
dtype=(torch.quint8, torch.qint8, torch.qint32),
contig=(False, True),
tags=('short',)
)
qarithmetic_binary_ops = op_bench.op_list(
attrs=(
('add', ops.quantized.add),
('add_relu', ops.quantized.add_relu),
('mul', ops.quantized.mul),
),
attr_names=('op_name', 'op_func'),
)
qarithmetic_binary_scalar_ops = op_bench.op_list(
attrs=(
('add_scalar', ops.quantized.add_scalar),
('mul_scalar', ops.quantized.mul_scalar),
),
attr_names=('op_name', 'op_func'),
)
class _QFunctionalBinaryArithmeticBenchmarkBase(op_bench.TorchBenchmarkBase):
def setup(self, N, dtype, contig):
self.qfunctional = torch.nn.quantized.QFunctional()
# TODO: Consider more diverse shapes
f_input = (torch.rand(N, N) - 0.5) * 256
self.scale = 1.0
self.zero_point = 0
self.q_input_a = torch.quantize_per_tensor(f_input, scale=self.scale,
zero_point=self.zero_point,
dtype=dtype)
if not contig:
permute_dims = list(range(f_input.ndim))[::-1]
self.q_input_a = self.q_input_a.permute(permute_dims)
class QFunctionalBenchmark(_QFunctionalBinaryArithmeticBenchmarkBase):
def init(self, N, dtype, contig, op_func):
super(QFunctionalBenchmark, self).setup(N, dtype, contig)
self.inputs = {
"q_input_a": self.q_input_a,
"q_input_b": self.q_input_a,
"scale": self.scale,
"zero_point": self.zero_point
}
self.op_func = op_func
def forward(self, q_input_a, q_input_b, scale: float, zero_point: int):
return self.op_func(q_input_a, q_input_b, scale=scale, zero_point=zero_point)
op_bench.generate_pt_tests_from_op_list(qarithmetic_binary_ops,
qarithmetic_binary_configs,
QFunctionalBenchmark)
class QFunctionalScalarBenchmark(_QFunctionalBinaryArithmeticBenchmarkBase):
def init(self, N, dtype, contig, op_func):
super(QFunctionalScalarBenchmark, self).setup(N, dtype, contig)
self.inputs = {
"q_input": self.q_input_a,
"scalar_input": 42
}
self.op_func = op_func
def forward(self, q_input, scalar_input: int):
return self.op_func(q_input, scalar_input)
op_bench.generate_pt_tests_from_op_list(qarithmetic_binary_scalar_ops,
qarithmetic_binary_configs,
QFunctionalScalarBenchmark)
if __name__ == '__main__':
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/qarithmetic_test.py |
import operator_benchmark as op_bench
import torch
from torch import nn
"""
Microbenchmarks for RNNs.
"""
qrnn_configs = op_bench.config_list(
attrs=[
[1, 3, 1],
[5, 7, 4],
],
# names: input_size, hidden_size, num_layers
attr_names=["I", "H", "NL"],
cross_product_configs={
"B": (True,), # Bias always True for quantized
"D": (False, True), # Bidirectional
"dtype": (torch.qint8,) # Only qint8 dtype works for now
},
tags=["short"]
)
class LSTMBenchmark(op_bench.TorchBenchmarkBase):
def init(self, I, H, NL, B, D, dtype):
sequence_len = 128
batch_size = 16
# The quantized.dynamic.LSTM has a bug. That's why we create a regular
# LSTM, and quantize it later. See issue #31192.
scale = 1.0 / 256
zero_point = 0
cell_nn = nn.LSTM(
input_size=I,
hidden_size=H,
num_layers=NL,
bias=B,
batch_first=False,
dropout=0.0,
bidirectional=D,
)
cell_temp = nn.Sequential(cell_nn)
self.cell = torch.ao.quantization.quantize_dynamic(cell_temp,
{nn.LSTM, nn.Linear},
dtype=dtype)[0]
x = torch.randn(sequence_len, # sequence length
batch_size, # batch size
I) # Number of features in X
h = torch.randn(NL * (D + 1), # layer_num * dir_num
batch_size, # batch size
H) # hidden size
c = torch.randn(NL * (D + 1), # layer_num * dir_num
batch_size, # batch size
H) # hidden size
self.inputs = {
"x": x,
"h": h,
"c": c
}
self.set_module_name("QLSTM")
def forward(self, x, h, c):
return self.cell(x, (h, c))[0]
op_bench.generate_pt_test(qrnn_configs, LSTMBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/qrnn_test.py |
import operator_benchmark as op_bench
import torch
tensor_conversion_short_configs = op_bench.cross_product_configs(
M=(8, 16, 32,),
N=(16, 64, 128,),
device=['cpu', 'cuda'],
tags=['short'],
)
tensor_conversion_long_configs = op_bench.cross_product_configs(
M=(64, 128, 256, 512,),
N=(256, 512, 1024, 2048,),
device=['cpu', 'cuda'],
tags=['long'],
)
class FloatToHalfTensorConversionBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, device):
self.inputs = {
"input": torch.rand(M, N, device=device, requires_grad=False, dtype=torch.float)
}
def forward(self, input):
return input.to(torch.half)
class HalfToFloatTensorConversionBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, device):
self.inputs = {
"input": torch.rand(M, N, device=device, requires_grad=False, dtype=torch.half)
}
def forward(self, input):
return input.to(torch.float)
op_bench.generate_pt_test(tensor_conversion_short_configs, FloatToHalfTensorConversionBenchmark)
op_bench.generate_pt_test(tensor_conversion_long_configs, FloatToHalfTensorConversionBenchmark)
op_bench.generate_pt_test(tensor_conversion_short_configs, HalfToFloatTensorConversionBenchmark)
op_bench.generate_pt_test(tensor_conversion_long_configs, HalfToFloatTensorConversionBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/tensor_to_test.py |
import operator_benchmark as op_bench
import torch
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
from pt import configs
"""
Microbenchmarks for Quantized Linear operators.
"""
class _QLinearBenchmarkBase(op_bench.TorchBenchmarkBase):
def init(self, N, IN, OUT, linear_under_test):
scale = torch.tensor(1.0 / 255)
zero_point = torch.tensor(0)
self.X = torch.randn(N, IN, dtype=torch.float32)
self.qX = torch.quantize_per_tensor(self.X, scale=scale, zero_point=zero_point, dtype=torch.quint8)
W = torch.randn(OUT, IN, dtype=torch.float32)
qW = torch.quantize_per_tensor(W, scale=scale, zero_point=0, dtype=torch.qint8)
# Assume that the `self.qlinear` is set in the child
self.qlinear = linear_under_test
self.qlinear.weight = qW
self.qlinear.scale = scale
self.qlinear.zero_point = zero_point
def forward(self, input):
# Assume that the `self.input` is set in the child
return self.qlinear(input)
class QLinearBenchmark(_QLinearBenchmarkBase):
def init(self, N, IN, OUT, device):
super(QLinearBenchmark, self).init(N, IN, OUT, nnq.Linear(IN, OUT))
self.inputs = {
"input": self.qX
}
self.set_module_name("QLinear")
class QDynamicLinearBenchmark(_QLinearBenchmarkBase):
def init(self, N, IN, OUT, device):
super(QDynamicLinearBenchmark, self).init(N, IN, OUT, nnqd.Linear(IN, OUT))
self.inputs = {
"input": self.X
}
self.set_module_name("QDynamicLinear")
op_bench.generate_pt_test(configs.remove_cuda(configs.linear_configs_short + configs.linear_configs_long), QLinearBenchmark)
op_bench.generate_pt_test(configs.remove_cuda(configs.linear_configs_short + configs.linear_configs_long), QDynamicLinearBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/qlinear_test.py |
import operator_benchmark as op_bench
import torch
embeddingbag_conversion_short_configs = op_bench.cross_product_configs(
num_embeddings=(80,),
embedding_dim=(128, 256, 512),
tags=('short',)
)
embeddingbag_conversion_long_configs = op_bench.cross_product_configs(
num_embeddings=(100, 120, 1000),
embedding_dim=(16, 64, 128, 256, 512, 1024, 2048),
tags=('long',)
)
embeddingbag_conversion_three_dim_configs = op_bench.cross_product_configs(
num_embeddings=(80,),
embedding_dim=(128, 256, 512),
batch_size=(10,),
tags=('short',)
)
conversion_ops = op_bench.op_list(
attrs=(
('qembeddingbag_byte_prepack', torch.ops.quantized.embedding_bag_byte_prepack),
('qembeddingbag_4bit_prepack', torch.ops.quantized.embedding_bag_4bit_prepack),
('qembeddingbag_2bit_prepack', torch.ops.quantized.embedding_bag_2bit_prepack),
),
attr_names=('op_name', 'op_func'),
)
unpack_ops = op_bench.op_list(
attrs=(
('qembeddingbag_byte_unpack', torch.ops.quantized.embedding_bag_byte_unpack),
('qembeddingbag_4bit_unpack', torch.ops.quantized.embedding_bag_4bit_unpack),
('qembeddingbag_2bit_unpack', torch.ops.quantized.embedding_bag_2bit_unpack),
),
attr_names=('op_name', 'op_func'),
)
class EmbeddingBagFloatToFusedBase(op_bench.TorchBenchmarkBase):
def init(self, num_embeddings, embedding_dim, op_func):
self.inputs = {
"weight": torch.rand(num_embeddings, embedding_dim, dtype=torch.float) + 1
}
self.op_func = op_func
def forward(self, weight):
return self.op_func(weight)
class EmbeddingBagThreeDimFloatToFusedBase(op_bench.TorchBenchmarkBase):
def init(self, num_embeddings, embedding_dim, batch_size, op_func):
self.inputs = {
"weight": torch.rand(batch_size, num_embeddings, embedding_dim, dtype=torch.float) + 1
}
self.op_func = op_func
def forward(self, weight):
return self.op_func(weight)
class EmbeddingBagFusedToFloatBase(op_bench.TorchBenchmarkBase):
def init(self, num_embeddings, embedding_dim, op_func):
weight = torch.randn(num_embeddings, embedding_dim + 8, dtype=torch.float)
self.inputs = {
"packed_weight": weight.to(torch.uint8)
}
self.op_func = op_func
def forward(self, packed_weight):
return self.op_func(packed_weight)
class EmbeddingBagThreeDimFusedToFloatBase(op_bench.TorchBenchmarkBase):
def init(self, num_embeddings, embedding_dim, batch_size, op_func):
weight = torch.randn(batch_size, num_embeddings, embedding_dim + 8, dtype=torch.float)
self.inputs = {
"packed_weight": weight.to(torch.uint8)
}
self.op_func = op_func
def forward(self, packed_weight):
return self.op_func(packed_weight)
op_bench.generate_pt_tests_from_op_list(conversion_ops,
embeddingbag_conversion_short_configs + embeddingbag_conversion_long_configs,
EmbeddingBagFloatToFusedBase)
op_bench.generate_pt_tests_from_op_list(unpack_ops,
embeddingbag_conversion_short_configs + embeddingbag_conversion_long_configs,
EmbeddingBagFusedToFloatBase)
op_bench.generate_pt_tests_from_op_list(conversion_ops,
embeddingbag_conversion_three_dim_configs,
EmbeddingBagThreeDimFloatToFusedBase)
op_bench.generate_pt_tests_from_op_list(unpack_ops,
embeddingbag_conversion_three_dim_configs,
EmbeddingBagThreeDimFusedToFloatBase)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/qembedding_pack_test.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for ClipRanges operator."""
torch.ops.load_library("//caffe2/torch/fb/sparsenn:sparsenn_operators")
# Configs for C2 ClipRanges operator
clip_ranges_long_configs = op_bench.cross_product_configs(
LENGTH=range(1, 100),
M=[1],
N=[2],
MAX_LENGTH=range(1, 100),
device=['cpu', 'cuda'],
dtype=[torch.int32],
tags=["long"],
)
clip_ranges_short_configs = op_bench.config_list(
attrs=[
[6, 1, 2, 1, torch.int32],
[7, 1, 2, 2, torch.int32],
[8, 1, 2, 3, torch.int32],
[9, 1, 2, 4, torch.int32],
[10, 1, 2, 5, torch.int32],
],
attr_names=["LENGTH", "M", "N", "MAX_LENGTH", "dtype"],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=["short"],
)
class ClipRangesBenchmark(op_bench.TorchBenchmarkBase):
def init(self, LENGTH, M, N, MAX_LENGTH, device, dtype):
self.inputs = {
"input": torch.rand(LENGTH, M, N, device=device).type(dtype),
"max_length": MAX_LENGTH
}
self.set_module_name("clip_ranges")
def forward(self, input, max_length: int):
return torch.ops.fb.clip_ranges(input, max_length)
op_bench.generate_pt_test(
clip_ranges_long_configs + clip_ranges_short_configs, ClipRangesBenchmark
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/clip_ranges_test.py |
import operator_benchmark as op_bench
import torch
import numpy
"""Microbenchmarks for index_select operator."""
# An example input from this configuration is M=4, N=4, dim=0.
index_select_configs_short = op_bench.config_list(
attr_names=["M", "N", "K", "dim"],
attrs=[
[8, 8, 1, 1],
[256, 512, 1, 1],
[512, 512, 1, 1],
[8, 8, 2, 1],
[256, 512, 2, 1],
[512, 512, 2, 1],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=["short"]
)
index_select_configs_long = op_bench.cross_product_configs(
M=[128, 1024],
N=[128, 1024],
K=[1, 2],
dim=[1],
device=['cpu', 'cuda'],
tags=["long"]
)
class IndexSelectBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, dim, device):
max_val = N
numpy.random.seed((1 << 32) - 1)
index_dim = numpy.random.randint(0, N)
self.inputs = {
"input_one": torch.rand(M, N, K, device=device),
"dim" : dim,
"index" : torch.tensor(numpy.random.randint(0, max_val, index_dim), device=device),
}
self.set_module_name("index_select")
def forward(self, input_one, dim, index):
return torch.index_select(input_one, dim, index)
op_bench.generate_pt_test(index_select_configs_short + index_select_configs_long,
IndexSelectBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/index_select_test.py |
import operator_benchmark as op_bench
import torch
import numpy as np
from typing import Optional
from torch.testing._internal.common_quantization import (
lengths_to_offsets
)
torch.ops.load_library("//caffe2/torch/fb/sparsenn:sparsenn_operators")
embedding_bag_rowwise_offsets_short_configs = op_bench.cross_product_configs(
num_embeddings=(80,),
embedding_dim=(128, 256),
num_offsets=range(2, 10),
enable_per_sample_weights=(True, False),
include_last_offset=(True, False),
is_pruned_weights=(True, False,),
use_32bit_indices=(True, False),
use_32bit_offsets=(True, False),
tags=['short'],
)
embedding_bag_rowwise_offsets_long_configs = op_bench.cross_product_configs(
num_embeddings=(100, 120, 1000, 10_000, 20_000),
embedding_dim=(16, 64, 128, 256),
num_offsets=range(10, 20),
enable_per_sample_weights=(True, False),
include_last_offset=(True, False),
is_pruned_weights=(True, False,),
use_32bit_indices=(True, False),
use_32bit_offsets=(True, False),
tags=['long']
)
full_configs = embedding_bag_rowwise_offsets_short_configs + embedding_bag_rowwise_offsets_long_configs
four_bit_rowwise_ops = op_bench.op_list(
attrs=(
('qembeddingbag_4bit_rowwise_offsets', torch.ops.quantized.embedding_bag_4bit_rowwise_offsets),
),
attr_names=('op_name', 'op_func'),
)
byte_rowwise_ops = op_bench.op_list(
attrs=(
('qembeddingbag_byte_rowwise_offsets', torch.ops.quantized.embedding_bag_byte_rowwise_offsets),
),
attr_names=('op_name', 'op_func'),
)
def get_pruned_weights_and_mapping(q_weights):
indicator = torch.from_numpy(np.random.uniform(
low=-1.0, high=1.0, size=[q_weights.shape[0]]).astype(np.float32))
q_pruned_weights, compressed_indices_mapping = torch.ops.fb.embedding_bag_rowwise_prune(
q_weights, indicator, 0.01, torch.int32)
return q_pruned_weights, compressed_indices_mapping
class EmbedddingBag4BitRowwiseOffsetsTest(op_bench.TorchBenchmarkBase):
def init(self,
num_embeddings: int,
embedding_dim: int,
num_offsets: int,
enable_per_sample_weights: bool,
include_last_offset: bool,
is_pruned_weights: bool,
use_32bit_indices: bool,
use_32bit_offsets: bool,
op_func):
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.num_offsets = num_offsets
self.enable_per_sample_weights = enable_per_sample_weights
self.include_last_offset = include_last_offset
self.max_segment_length = 20
self.num_lengths = np.random.randint(1, num_offsets + 1)
self.lengths = np.random.randint(0, self.max_segment_length + 1,
size=self.num_lengths).astype(np.int32)
self.num_indices = np.sum(self.lengths)
self.is_pruned_weights = is_pruned_weights
self.use_32bit_indices = use_32bit_indices
self.use_32bit_offsets = use_32bit_offsets
self.offsets = lengths_to_offsets(self.lengths)
self.indices = torch.from_numpy(np.random.randint(
low=0, high=num_embeddings, size=self.num_indices, dtype=np.int64))
self.indices = self.indices.int() if self.use_32bit_indices else self.indices
self.offsets = self.offsets.int() if self.use_32bit_offsets else self.offsets
if self.include_last_offset:
self.offsets = torch.cat(
(self.offsets, torch.tensor([self.indices.size(0)], dtype=torch.long)), 0
)
self.weights = torch.from_numpy((np.random.random_sample((
self.num_embeddings, self.embedding_dim)) + 1).astype(np.float32))
self.indices = torch.from_numpy(np.random.randint(
low=0, high=self.num_embeddings, size=self.num_indices, dtype=np.int64))
self.prepack_func = torch.ops.quantized.embedding_bag_4bit_prepack
self.prepacked_weights = self.prepack_func(self.weights)
self.per_sample_weights = torch.from_numpy(np.random.uniform(
low=0.01, high=0.5, size=[len(self.indices)]).astype(np.float32)) if \
self.enable_per_sample_weights else None
self.compressed_indices = None
if self.is_pruned_weights:
self.prepacked_weights, self.compressed_indices = get_pruned_weights_and_mapping(self.prepacked_weights)
self.inputs = {
"prepacked_weights": self.prepacked_weights,
"indices": self.indices,
"offsets": self.offsets,
"mode": 0,
"per_sample_weights": self.per_sample_weights,
"include_last_offset": self.include_last_offset,
"is_pruned_weights": self.is_pruned_weights,
"compressed_indices": self.compressed_indices
}
self.op_func = op_func
def forward(
self,
prepacked_weights,
indices,
offsets,
mode: int,
per_sample_weights: Optional[torch.Tensor],
include_last_offset: bool,
is_pruned_weights: bool,
compressed_indices: Optional[torch.Tensor]
):
return self.op_func(prepacked_weights, indices, offsets,
mode=mode,
per_sample_weights=per_sample_weights,
include_last_offset=include_last_offset,
pruned_weights=is_pruned_weights,
compressed_indices_mapping=compressed_indices)
class EmbedddingBagByteRowwiseOffsetsTest(op_bench.TorchBenchmarkBase):
def init(self,
num_embeddings: int,
embedding_dim: int,
num_offsets: int,
enable_per_sample_weights: bool,
include_last_offset: bool,
is_pruned_weights: bool,
use_32bit_indices: bool,
use_32bit_offsets: bool,
op_func):
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.num_offsets = num_offsets
self.enable_per_sample_weights = enable_per_sample_weights
self.include_last_offset = include_last_offset
self.max_segment_length = 20
self.num_lengths = np.random.randint(1, num_offsets + 1)
self.lengths = np.random.randint(0, self.max_segment_length + 1,
size=self.num_lengths).astype(np.int32)
self.is_pruned_weights = is_pruned_weights
self.use_32bit_indices = use_32bit_indices
self.use_32bit_offsets = use_32bit_offsets
self.num_indices = np.sum(self.lengths)
self.offsets = lengths_to_offsets(self.lengths)
self.indices = torch.from_numpy(np.random.randint(
low=0, high=num_embeddings, size=self.num_indices, dtype=np.int64))
self.indices = self.indices.int() if self.use_32bit_indices else self.indices
self.offsets = self.offsets.int() if self.use_32bit_offsets else self.offsets
if include_last_offset:
self.offsets = torch.cat(
(self.offsets, torch.tensor([self.indices.size(0)], dtype=torch.long)), 0
)
self.weights = torch.from_numpy((np.random.random_sample((
self.num_embeddings, self.embedding_dim)) + 1).astype(np.float32))
self.indices = torch.from_numpy(np.random.randint(
low=0, high=self.num_embeddings, size=self.num_indices, dtype=np.int64))
self.prepack_func = torch.ops.quantized.embedding_bag_byte_prepack
self.prepacked_weights = self.prepack_func(self.weights)
self.per_sample_weights = torch.from_numpy(np.random.uniform(
low=0.01, high=0.5, size=[len(self.indices)]).astype(np.float32)) if \
self.enable_per_sample_weights else None
self.compressed_indices = None
if self.is_pruned_weights:
self.prepacked_weights, self.compressed_indices = get_pruned_weights_and_mapping(self.prepacked_weights)
self.inputs = {
"prepacked_weights": self.prepacked_weights,
"indices": self.indices,
"offsets": self.offsets,
"mode": 0,
"per_sample_weights": self.per_sample_weights,
"include_last_offset": self.include_last_offset,
"is_pruned_weights": self.is_pruned_weights,
"compressed_indices": self.compressed_indices
}
self.op_func = op_func
def forward(
self,
prepacked_weights,
indices,
offsets,
mode: int,
per_sample_weights: Optional[torch.Tensor],
include_last_offset: bool,
is_pruned_weights: bool,
compressed_indices: Optional[torch.Tensor]
):
return self.op_func(prepacked_weights, indices, offsets,
mode=0,
per_sample_weights=per_sample_weights,
include_last_offset=self.include_last_offset,
pruned_weights=self.is_pruned_weights,
compressed_indices_mapping=self.compressed_indices)
op_bench.generate_pt_tests_from_op_list(four_bit_rowwise_ops,
full_configs,
EmbedddingBag4BitRowwiseOffsetsTest)
op_bench.generate_pt_tests_from_op_list(byte_rowwise_ops,
full_configs,
EmbedddingBagByteRowwiseOffsetsTest)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/qembedding_bag_lookups_test.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for MatMul operator"""
# Configs for PT Matmul operator
mm_short_configs = op_bench.config_list(
attr_names=["M", "N", "K", "trans_a", "trans_b"],
attrs=[
[1, 1, 1, True, False],
[128, 128, 128, True, False],
[256, 256, 256, False, True],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=["short"],
)
mm_long_configs = op_bench.cross_product_configs(
M=[32],
N=[512, 128],
K=[64],
trans_a=[False, True],
trans_b=[True, False],
device=['cpu', 'cuda'],
tags=["long"]
)
class MatMulBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, trans_a, trans_b, device):
self.inputs = {
"input_one": torch.rand(M, N, device=device)
if trans_a
else torch.rand(N, M, device=device).t(),
"input_two": torch.rand(N, K, device=device)
if trans_b
else torch.rand(K, N, device=device).t(),
}
self.set_module_name("matmul")
def forward(self, input_one, input_two):
return torch.matmul(input_one, input_two)
op_bench.generate_pt_test(mm_long_configs + mm_short_configs, MatMulBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/matmul_test.py |
import torch
import operator_benchmark as op_bench
qcomparators_configs = op_bench.cross_product_configs(
N=(8, 64),
dtype=(torch.quint8, torch.qint8, torch.qint32),
contig=(False, True),
other_scalar=(False, True),
out_variant=(False, True),
tags=('short',)
)
qcomparators_ops = op_bench.op_list(
attrs=(
('eq', torch.eq),
('ne', torch.ne),
('lt', torch.lt),
('gt', torch.gt),
('le', torch.le),
('ge', torch.ge),
),
attr_names=('op_name', 'op_func'),
)
class QComparatorBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, dtype, contig, other_scalar, out_variant, op_func):
# TODO: Consider more diverse shapes
f_input = (torch.rand(N, N) - 0.5) * 256
scale = 1.0
zero_point = 0
q_input_a = torch.quantize_per_tensor(f_input, scale=scale,
zero_point=zero_point,
dtype=dtype)
q_input_b = q_input_a.clone()
if not contig:
permute_dims = list(range(f_input.ndim))[::-1]
q_input_a = q_input_a.permute(permute_dims)
self.qop = op_func
self.inputs = {
"q_input_a": q_input_a,
"q_input_b": q_input_b,
"out_variant": out_variant,
"other_scalar": other_scalar,
}
def forward(self, q_input_a, q_input_b, out_variant: bool, other_scalar: bool):
if out_variant:
if other_scalar:
return self.qop(q_input_a, 42, out=torch.tensor(True, dtype=torch.bool))
else:
return self.qop(q_input_a, q_input_b, out=torch.tensor(True, dtype=torch.bool))
else:
if other_scalar:
return self.qop(q_input_a, 42)
else:
return self.qop(q_input_a, q_input_b)
op_bench.generate_pt_tests_from_op_list(qcomparators_ops,
qcomparators_configs,
QComparatorBenchmark)
if __name__ == '__main__':
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/qcomparators_test.py |
import torch
import operator_benchmark as op_bench
# 2D pooling will have input matrix of rank 3 or 4
qpool2d_long_configs = op_bench.config_list(
attrs=(
# C H W k s p
( 1, 3, 3, (3, 3), (1, 1), (0, 0)), # dummy # noqa: E201,E241
( 3, 64, 64, (3, 3), (2, 2), (1, 1)), # dummy # noqa: E201,E241
# VGG16 pools with original input shape: (-1, 3, 224, 224)
( 64, 224, 224, (2, 2), (2, 2), (0, 0)), # MaxPool2d-4 # noqa: E201
(256, 56, 56, (2, 2), (2, 2), (0, 0)), # MaxPool2d-16 # noqa: E241
),
attr_names=('C', 'H', 'W', # Input layout
'k', 's', 'p'), # Pooling parameters
cross_product_configs={
'N': (1, 4),
'contig': (False, True),
'dtype': (torch.quint8,),
},
tags=('long',)
)
qpool2d_short_configs = op_bench.config_list(
attrs=((1, 3, 3, (3, 3), (1, 1), (0, 0)),), # dummy
attr_names=('C', 'H', 'W', # Input layout
'k', 's', 'p'), # Pooling parameters
cross_product_configs={
'N': (2,),
'contig': (True,),
'dtype': (torch.qint32, torch.qint8, torch.quint8),
},
tags=('short',)
)
qadaptive_avgpool2d_long_configs = op_bench.cross_product_configs(
input_size=(
# VGG16 pools with original input shape: (-1, 3, 224, 224)
(112, 112), # MaxPool2d-9
),
output_size=(
(448, 448),
# VGG16 pools with original input shape: (-1, 3, 224, 224)
(224, 224), # MaxPool2d-4
(112, 112), # MaxPool2d-9
( 56, 56), # MaxPool2d-16 # noqa: E201,E241
( 14, 14), # MaxPool2d-30 # noqa: E201,E241
),
N=(1, 4),
C=(1, 3, 64, 128),
contig=(False, True),
dtype=(torch.quint8,),
tags=('long',)
)
qadaptive_avgpool2d_short_configs = op_bench.config_list(
attrs=((4, 3, (224, 224), (112, 112), True),),
attr_names=('N', 'C', 'input_size', 'output_size', 'contig'),
cross_product_configs={
'dtype': (torch.qint32, torch.qint8, torch.quint8),
},
tags=('short',)
)
class _QPool2dBenchmarkBase(op_bench.TorchBenchmarkBase):
def setup(self, N, C, H, W, dtype, contig):
# Input
if N == 0:
f_input = (torch.rand(C, H, W) - 0.5) * 256
else:
f_input = (torch.rand(N, C, H, W) - 0.5) * 256
scale = 1.0
zero_point = 0
# Quantize the tensor
self.q_input = torch.quantize_per_tensor(f_input, scale=scale,
zero_point=zero_point,
dtype=dtype)
if not contig:
# Permute into NHWC and back to make it non-contiguous
if N == 0:
self.q_input = self.q_input.permute(1, 2, 0).contiguous()
self.q_input = self.q_input.permute(2, 0, 1)
else:
self.q_input = self.q_input.permute(0, 2, 3, 1).contiguous()
self.q_input = self.q_input.permute(0, 3, 1, 2)
self.inputs = {
"q_input": self.q_input
}
def forward(self, q_input):
return self.pool_op(q_input)
class QMaxPool2dBenchmark(_QPool2dBenchmarkBase):
def init(self, N, C, H, W, k, s, p, contig, dtype):
self.pool_op = torch.nn.MaxPool2d(kernel_size=k, stride=s, padding=p,
dilation=(1, 1), ceil_mode=False,
return_indices=False)
super(QMaxPool2dBenchmark, self).setup(N, C, H, W, dtype, contig)
class QAvgPool2dBenchmark(_QPool2dBenchmarkBase):
def init(self, N, C, H, W, k, s, p, contig, dtype):
self.pool_op = torch.nn.AvgPool2d(kernel_size=k, stride=s, padding=p,
ceil_mode=False)
super(QAvgPool2dBenchmark, self).setup(N, C, H, W, dtype, contig)
class QAdaptiveAvgPool2dBenchmark(_QPool2dBenchmarkBase):
def init(self, N, C, input_size, output_size, contig, dtype):
self.pool_op = torch.nn.AdaptiveAvgPool2d(output_size=output_size)
super(QAdaptiveAvgPool2dBenchmark, self).setup(N, C, *input_size,
dtype=dtype,
contig=contig)
op_bench.generate_pt_test(qadaptive_avgpool2d_short_configs + qadaptive_avgpool2d_long_configs,
QAdaptiveAvgPool2dBenchmark)
op_bench.generate_pt_test(qpool2d_short_configs + qpool2d_long_configs,
QAvgPool2dBenchmark)
op_bench.generate_pt_test(qpool2d_short_configs + qpool2d_long_configs,
QMaxPool2dBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/qpool_test.py |
import operator_benchmark as op_bench
import torch
import numpy
"""Microbenchmarks for gather operator."""
# An example input from this configuration is M=4, N=4, dim=0.
gather_configs_short = op_bench.config_list(
attr_names=["M", "N", "dim"],
attrs=[
[256, 512, 0],
[512, 512, 1],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=["short"]
)
gather_configs_long = op_bench.cross_product_configs(
M=[128, 1024],
N=[128, 1024],
dim=[0, 1],
device=['cpu', 'cuda'],
tags=["long"]
)
class GatherBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, dim, device):
min_val = M if dim == 0 else N
numpy.random.seed((1 << 32) - 1)
self.inputs = {
"input_one": torch.rand(M, N, device=device),
"dim": dim,
"index": torch.tensor(numpy.random.randint(0, min_val, (M, N)), device=device)
}
self.set_module_name("gather")
def forward(self, input_one, dim: int, index):
return torch.gather(input_one, dim, index)
op_bench.generate_pt_test(gather_configs_short + gather_configs_long,
GatherBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/gather_test.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for linear_unpack_fp16_ operator. Supports both Caffe2/PyTorch."""
# Configs for PT linear_unpack_fp16 operator
linear_unpack_fp16_long_configs = op_bench.cross_product_configs(
M=[8, 128],
N=[32, 64],
K=[256, 512],
device=['cpu'],
tags=["long"]
)
linear_unpack_fp16_short_configs = op_bench.config_list(
attr_names=["M", "N", "K"],
attrs=[
[1, 1, 1],
[64, 64, 64],
[64, 64, 128],
],
cross_product_configs={
'device': ['cpu'],
},
tags=["short"],
)
class LinearUnpackFP16Benchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
# input to unpack operator must be what the output is for prepack operator
self.inputs = {
"input_one": torch.ops.quantized.linear_prepack_fp16(torch.rand(M, N, K, device=device,
requires_grad=False,
dtype=torch.float32))
}
self.set_module_name("linear_unpack_fp16")
def forward(self, input_one):
return torch.ops.quantized.linear_unpack_fp16(input_one)
# The generated test names based on linear_unpack_fp16_short_configs will be in the following pattern:
# linear_unpack_fp16_M8_N16_K32_devicecpu
op_bench.generate_pt_test(linear_unpack_fp16_long_configs + linear_unpack_fp16_short_configs, LinearUnpackFP16Benchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/linear_unpack_fp16_test.py |
import operator_benchmark as op_bench
import torch
import torch.nn.quantized as nnq
import numpy
from pt import configs
"""
Microbenchmarks for qEmbeddingBag operators.
"""
class QEmbeddingBagBenchmark(op_bench.TorchBenchmarkBase):
def init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset, device):
self.embedding = nnq.EmbeddingBag(
num_embeddings=embeddingbags,
embedding_dim=dim,
mode=mode,
include_last_offset=include_last_offset).to(device=device)
numpy.random.seed((1 << 32) - 1)
self.input = torch.tensor(numpy.random.randint(0, embeddingbags, input_size), device=device).long()
offset = torch.LongTensor([offset], device=device)
self.offset = torch.cat((offset, torch.tensor([self.input.size(0)], dtype=torch.long)), 0)
self.inputs = {
"input": self.input,
"offset": self.offset
}
self.set_module_name('qEmbeddingBag')
def forward(self, input, offset):
return self.embedding(input, offset)
op_bench.generate_pt_test(configs.embeddingbag_short_configs, QEmbeddingBagBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/qembeddingbag_test.py |
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for linear_prepack_fp16_ operator. Supports both Caffe2/PyTorch."""
# Configs for PT linear_prepack_fp16 operator
linear_prepack_fp16_long_configs = op_bench.cross_product_configs(
M=[8, 128],
N=[32, 64],
K=[256, 512],
device=['cpu'],
tags=["long"]
)
linear_prepack_fp16_short_configs = op_bench.config_list(
attr_names=["M", "N", "K"],
attrs=[
[1, 1, 1],
[64, 64, 64],
[64, 64, 128],
],
cross_product_configs={
'device': ['cpu'],
},
tags=["short"],
)
class LinearPrepackFP16Benchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
self.inputs = {
"input_one": torch.rand(M, N, K, device=device, requires_grad=False, dtype=torch.float32)
}
self.set_module_name("linear_prepack_fp16")
def forward(self, input_one):
return torch.ops.quantized.linear_prepack_fp16(input_one)
# The generated test names based on linear_prepack_fp16_short_configs will be in the following pattern:
# linear_prepack_fp16_M8_N16_K32_devicecpu
op_bench.generate_pt_test(linear_prepack_fp16_long_configs + linear_prepack_fp16_short_configs, LinearPrepackFP16Benchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/linear_prepack_fp16_test.py |
import operator_benchmark as op_bench
import torch
import torch.nn as nn
"""
Microbenchmarks for MaxPool1d and AvgPool1d operators.
"""
# Configs for pool-1d ops
pool_1d_configs_short = op_bench.config_list(
attr_names=[
'kernel', 'stride', 'N', 'C', 'L'
],
attrs=[
[3, 1, 8, 256, 256],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=['short']
)
pool_1d_configs_long = op_bench.cross_product_configs(
kernel=[3],
stride=[1, 2],
N=[8, 16],
C=[3],
L=[128, 256],
device=['cpu', 'cuda'],
tags=['long']
)
pool_1d_ops_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['MaxPool1d', nn.MaxPool1d],
['AvgPool1d', nn.AvgPool1d],
],
)
class Pool1dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, kernel, stride, N, C, L, device, op_func):
self.inputs = {
"input": torch.rand(N, C, L, device=device)
}
self.op_func = op_func(kernel, stride=stride)
def forward(self, input):
return self.op_func(input)
op_bench.generate_pt_tests_from_op_list(pool_1d_ops_list,
pool_1d_configs_short + pool_1d_configs_long,
Pool1dBenchmark)
"""
Microbenchmarks for MaxPool2d and AvgPool2d operators.
"""
# Configs for pool-2d ops
pool_2d_configs_short = op_bench.config_list(
attr_names=[
'kernel', 'stride', 'N', 'C', 'H', 'W'
],
attrs=[
[[3, 1], [2, 1], 1, 16, 32, 32],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=['short']
)
pool_2d_configs_long = op_bench.cross_product_configs(
kernel=[[3, 2], [3, 3]],
stride=[[2, 2]],
N=[8, 16],
C=[32],
H=[32, 64],
W=[32, 64],
device=['cpu', 'cuda'],
tags=['long']
)
pool_2d_ops_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['MaxPool2d', nn.MaxPool2d],
['AvgPool2d', nn.AvgPool2d],
['AdaptiveMaxPool2d', lambda kernel, stride: nn.AdaptiveMaxPool2d(kernel)],
['FractionalMaxPool2d', lambda kernel, stride: nn.FractionalMaxPool2d(kernel, output_size=2)],
],
)
class Pool2dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, kernel, stride, N, C, H, W, device, op_func):
self.inputs = {
"input": torch.rand(N, C, H, W, device=device)
}
self.op_func = op_func(kernel, stride=stride)
def forward(self, input):
return self.op_func(input)
op_bench.generate_pt_tests_from_op_list(pool_2d_ops_list,
pool_2d_configs_short + pool_2d_configs_long,
Pool2dBenchmark)
"""
Microbenchmarks for MaxPool3d and AvgPool3d operators.
"""
# Configs for pool-3d ops
pool_3d_configs_short = op_bench.config_list(
attr_names=[
'kernel', 'stride', 'N', 'C', 'D', 'H', 'W'
],
attrs=[
[[3, 1, 3], [2, 1, 2], 1, 16, 16, 32, 32],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=['short']
)
pool_3d_configs_long = op_bench.cross_product_configs(
kernel=[[3, 2, 3], [3, 3, 3]],
stride=[[2, 2, 2]],
N=[8, 16],
C=[32],
D=[32],
H=[32, 64],
W=[32, 64],
device=['cpu', 'cuda'],
tags=['long']
)
pool_3d_ops_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['MaxPool3d', nn.MaxPool3d],
['AvgPool3d', nn.AvgPool3d],
['AdaptiveMaxPool3d', lambda kernel, stride: nn.AdaptiveMaxPool3d(kernel)],
['FractionalMaxPool3d', lambda kernel, stride: nn.FractionalMaxPool3d(kernel, output_size=2)],
],
)
class Pool3dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, kernel, stride, N, C, D, H, W, device, op_func):
self.inputs = {
"input": torch.rand(N, C, D, H, W, device=device)
}
self.op_func = op_func(kernel, stride=stride)
def forward(self, input):
return self.op_func(input)
op_bench.generate_pt_tests_from_op_list(pool_3d_ops_list,
pool_3d_configs_short + pool_3d_configs_long,
Pool3dBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/pt/pool_test.py |
import operator_benchmark as op_bench
import benchmark_caffe2 as op_bench_c2
import random
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for Concat operator. Supports both Caffe2/PyTorch."""
cross_product_configs = {
'device': ['cpu', 'cuda'],
'dtype': ['float'],
'add_axis': [0],
}
# Configs for C2 concat operator
cat_configs_short = op_bench.config_list(
attr_names=['sizes', 'N', 'axis'],
attrs=[
[(1, 1, 1), 2, 0], # noqa: E241
[(512, 512, 2), 2, 1], # noqa: E241
[(128, 1024, 2), 2, 1], # noqa: E241
],
cross_product_configs=cross_product_configs,
tags=['short'],
)
# Configs specific to static runtime feature - a fast runtime for pared down models
cat_configs_static_runtime = op_bench.config_list(
attr_names=['sizes', 'N', 'axis', 'add_axis'],
attrs=[
[(1, 40), 5, 1, 1],
[[(1, 160), (1, 14)], -1, 1, 0],
[[(1, 20, 40), (1, 4, 40), (1, 5, 40)], -1, 1, 0],
[[(1, 580), (1, 174)], -1, 1, 0],
[(20, 40), 5, 1, 1],
[[(20, 160), (20, 14)], -1, 1, 0],
[[(20, 20, 40), (20, 4, 40), (20, 5, 40)], -1, 1, 0],
[[(20, 580), (20, 174)], -1, 1, 0],
],
cross_product_configs=cross_product_configs,
tags=['static_runtime'],
)
cat_configs_long = op_bench.config_list(
attr_names=['sizes', 'N', 'axis'],
attrs=[
[(2**10, 2**10, 2), 2, 0], # noqa: E241
[(2**10+1, 2**10-1, 2), 2, 1], # noqa: E226,E241
[(2**10, 2**10, 2), 2, 2], # noqa: E241
[[ lambda: random.randint(2**6, 2**7), 2**7-17, 2**6+1], # noqa: E201,E226,E241
5, 0],
[[ 2**6+2**5, lambda: random.randint(2**6, 2**7), 2**6], # noqa: E201,E226,E241,E272
5, 1],
[[ 2**7, 2**6, lambda: random.randint(2**6, 2**7)], # noqa: E201,E241,E272
5, 2],
[[lambda: random.randint(2**5, 2**6), 2**5, 2**6], # noqa: E241
50, 0],
[[2**5, lambda: random.randint(2**5, 2**6), 2**6], # noqa: E241,E272
50, 1],
[[2**5+1, 2**6+1, lambda: random.randint(2**5, 2**6)], # noqa: E226,E241,E272
50, 2],
],
cross_product_configs=cross_product_configs,
tags=['long'],
)
# There is a different codepath on CUDA for >4 dimensions
cat_configs_multidim = op_bench.config_list(
attr_names=['sizes', 'N', 'axis', 'dtype'],
attrs=[
[(2**6, 2**5, 2**2, 2**4, 2**5), 2, 2], # noqa: E241
[(2**4, 2**5, 2**2, 2**4, 2**5), 8, 2], # noqa: E241
[(2**3+1, 2**5-1, 2**2+1, 2**4-1, 2**5+1), 17, 4], # noqa: E226,E241
],
cross_product_configs=cross_product_configs,
tags=['multidim'],
)
cat_configs_manyinputs = op_bench.config_list(
attr_names=['sizes', 'N', 'axis'],
attrs=[
[[lambda: random.randint(1, 10000)], 100, 0],
[[lambda: random.randint(1, 1000)], 1000, 0],
[[lambda: random.randint(1, 500)], 2000, 0],
[[lambda: random.randint(1, 300)], 3000, 0],
],
cross_product_configs=cross_product_configs,
tags=['manyinputs'],
)
class ConcatBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, sizes, N, axis, add_axis, dtype, device):
random.seed(42)
self.inputs = []
self.args = {'axis': axis, 'add_axis': add_axis}
gen_sizes = []
if type(sizes) == list and N == -1:
gen_sizes = sizes
else:
for i in range(N):
gen_sizes.append([old_size() if callable(old_size) else old_size for old_size in sizes])
for s in gen_sizes:
self.inputs.append(self.tensor(s, dtype, device=device))
self.output = self.tensor(gen_sizes[0], dtype, device=device)
self.split_info = self.tensor(gen_sizes[0], "int")
self.set_module_name("concat")
def forward(self):
op = core.CreateOperator(
"Concat", self.inputs, [self.output, self.split_info], **self.args
)
return op
op_bench_c2.generate_c2_test(cat_configs_short +
cat_configs_long +
cat_configs_multidim +
cat_configs_manyinputs +
cat_configs_static_runtime,
ConcatBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| pytorch-master | benchmarks/operator_benchmark/c2/concat_test.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.