python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
import ast
import sys
from typing import List, Optional, Tuple
from ._importlib import _resolve_name
class _ExtractModuleReferences(ast.NodeVisitor):
"""
Extract the list of global variables a block of code will read and write
"""
@classmethod
def run(cls, src: str, package: str) -> List[Tuple[str, Optional[str]]]:
visitor = cls(package)
tree = ast.parse(src)
visitor.visit(tree)
return list(visitor.references.keys())
def __init__(self, package):
super().__init__()
self.package = package
self.references = {}
def _absmodule(self, module_name: str, level: int) -> str:
if level > 0:
return _resolve_name(module_name, self.package, level)
return module_name
def visit_Import(self, node):
for alias in node.names:
self.references[(alias.name, None)] = True
def visit_ImportFrom(self, node):
name = self._absmodule(node.module, 0 if node.level is None else node.level)
for alias in node.names:
# from my_package import foo
# foo may be a module, so we have to add it to the list of
# potential references, if import of it fails, we will ignore it
if alias.name != "*":
self.references[(name, alias.name)] = True
else:
self.references[(name, None)] = True
def _grab_node_int(self, node):
if sys.version_info[:2] < (3, 8):
return node.n
else:
return node.value
def _grab_node_str(self, node):
if sys.version_info[:2] < (3, 8):
return node.s
else:
return node.value
def visit_Call(self, node):
# __import__ calls aren't routed to the visit_Import/From nodes
if hasattr(node.func, "id") and node.func.id == "__import__":
try:
name = self._grab_node_str(node.args[0])
fromlist = []
level = 0
if len(node.args) > 3:
for v in node.args[3].elts:
fromlist.append(self._grab_node_str(v))
elif hasattr(node, "keywords"):
for keyword in node.keywords:
if keyword.arg == "fromlist":
for v in keyword.value.elts:
fromlist.append(self._grab_node_str(v))
if len(node.args) > 4:
level = self._grab_node_int(node.args[4])
elif hasattr(node, "keywords"):
for keyword in node.keywords:
if keyword.arg == "level":
level = self._grab_node_int(keyword.value)
if fromlist == []:
# the top-level package (the name up till the first dot) is returned
# when the fromlist argument is empty in normal import system,
# we need to include top level package to match this behavior and last
# level package to capture the intended dependency of user
self.references[(name, None)] = True
top_name = name.rsplit(".", maxsplit=1)[0]
if top_name != name:
top_name = self._absmodule(top_name, level)
self.references[(top_name, None)] = True
else:
name = self._absmodule(name, level)
for alias in fromlist:
# fromlist args may be submodules, so we have to add the fromlist args
# to the list of potential references. If import of an arg fails we
# will ignore it, similar to visit_ImportFrom
if alias != "*":
self.references[(name, alias)] = True
else:
self.references[(name, None)] = True
except Exception as e:
return
find_files_source_depends_on = _ExtractModuleReferences.run
| pytorch-master | torch/package/find_file_dependencies.py |
"""isort:skip_file"""
from pickle import ( # type: ignore[attr-defined]
_compat_pickle,
_extension_registry,
_getattribute,
_Pickler,
EXT1,
EXT2,
EXT4,
GLOBAL,
Pickler,
PicklingError,
STACK_GLOBAL,
)
from struct import pack
from types import FunctionType
from .importer import Importer, ObjMismatchError, ObjNotFoundError, sys_importer
class PackagePickler(_Pickler):
"""Package-aware pickler.
This behaves the same as a normal pickler, except it uses an `Importer`
to find objects and modules to save.
"""
def __init__(self, importer: Importer, *args, **kwargs):
self.importer = importer
super().__init__(*args, **kwargs)
# Make sure the dispatch table copied from _Pickler is up-to-date.
# Previous issues have been encountered where a library (e.g. dill)
# mutate _Pickler.dispatch, PackagePickler makes a copy when this lib
# is imported, then the offending library removes its dispatch entries,
# leaving PackagePickler with a stale dispatch table that may cause
# unwanted behavior.
self.dispatch = _Pickler.dispatch.copy() # type: ignore[misc]
self.dispatch[FunctionType] = PackagePickler.save_global # type: ignore[assignment]
def save_global(self, obj, name=None):
# unfortunately the pickler code is factored in a way that
# forces us to copy/paste this function. The only change is marked
# CHANGED below.
write = self.write # type: ignore[attr-defined]
memo = self.memo # type: ignore[attr-defined]
# CHANGED: import module from module environment instead of __import__
try:
module_name, name = self.importer.get_name(obj, name)
except (ObjNotFoundError, ObjMismatchError) as err:
raise PicklingError(f"Can't pickle {obj}: {str(err)}") from None
module = self.importer.import_module(module_name)
_, parent = _getattribute(module, name)
# END CHANGED
if self.proto >= 2: # type: ignore[attr-defined]
code = _extension_registry.get((module_name, name))
if code:
assert code > 0
if code <= 0xFF:
write(EXT1 + pack("<B", code))
elif code <= 0xFFFF:
write(EXT2 + pack("<H", code))
else:
write(EXT4 + pack("<i", code))
return
lastname = name.rpartition(".")[2]
if parent is module:
name = lastname
# Non-ASCII identifiers are supported only with protocols >= 3.
if self.proto >= 4: # type: ignore[attr-defined]
self.save(module_name) # type: ignore[attr-defined]
self.save(name) # type: ignore[attr-defined]
write(STACK_GLOBAL)
elif parent is not module:
self.save_reduce(getattr, (parent, lastname)) # type: ignore[attr-defined]
elif self.proto >= 3: # type: ignore[attr-defined]
write(
GLOBAL
+ bytes(module_name, "utf-8")
+ b"\n"
+ bytes(name, "utf-8")
+ b"\n"
)
else:
if self.fix_imports: # type: ignore[attr-defined]
r_name_mapping = _compat_pickle.REVERSE_NAME_MAPPING
r_import_mapping = _compat_pickle.REVERSE_IMPORT_MAPPING
if (module_name, name) in r_name_mapping:
module_name, name = r_name_mapping[(module_name, name)]
elif module_name in r_import_mapping:
module_name = r_import_mapping[module_name]
try:
write(
GLOBAL
+ bytes(module_name, "ascii")
+ b"\n"
+ bytes(name, "ascii")
+ b"\n"
)
except UnicodeEncodeError:
raise PicklingError(
"can't pickle global identifier '%s.%s' using "
"pickle protocol %i" % (module, name, self.proto) # type: ignore[attr-defined]
) from None
self.memoize(obj) # type: ignore[attr-defined]
def create_pickler(data_buf, importer, protocol=4):
if importer is sys_importer:
# if we are using the normal import library system, then
# we can use the C implementation of pickle which is faster
return Pickler(data_buf, protocol=protocol)
else:
return PackagePickler(importer, data_buf, protocol=protocol)
| pytorch-master | torch/package/_package_pickler.py |
import _warnings
import os.path
# note: implementations
# copied from cpython's import code
# _zip_searchorder defines how we search for a module in the Zip
# archive: we first search for a package __init__, then for
# non-package .pyc, and .py entries. The .pyc entries
# are swapped by initzipimport() if we run in optimized mode. Also,
# '/' is replaced by path_sep there.
_zip_searchorder = (
("/__init__.py", True),
(".py", False),
)
# Replace any occurrences of '\r\n?' in the input string with '\n'.
# This converts DOS and Mac line endings to Unix line endings.
def _normalize_line_endings(source):
source = source.replace(b"\r\n", b"\n")
source = source.replace(b"\r", b"\n")
return source
def _resolve_name(name, package, level):
"""Resolve a relative module name to an absolute one."""
bits = package.rsplit(".", level - 1)
if len(bits) < level:
raise ValueError("attempted relative import beyond top-level package")
base = bits[0]
return "{}.{}".format(base, name) if name else base
def _sanity_check(name, package, level):
"""Verify arguments are "sane"."""
if not isinstance(name, str):
raise TypeError("module name must be str, not {}".format(type(name)))
if level < 0:
raise ValueError("level must be >= 0")
if level > 0:
if not isinstance(package, str):
raise TypeError("__package__ not set to a string")
elif not package:
raise ImportError(
"attempted relative import with no known parent " "package"
)
if not name and level == 0:
raise ValueError("Empty module name")
def _calc___package__(globals):
"""Calculate what __package__ should be.
__package__ is not guaranteed to be defined or could be set to None
to represent that its proper value is unknown.
"""
package = globals.get("__package__")
spec = globals.get("__spec__")
if package is not None:
if spec is not None and package != spec.parent:
_warnings.warn(
"__package__ != __spec__.parent " f"({package!r} != {spec.parent!r})",
ImportWarning,
stacklevel=3,
)
return package
elif spec is not None:
return spec.parent
else:
_warnings.warn(
"can't resolve package from __spec__ or __package__, "
"falling back on __name__ and __path__",
ImportWarning,
stacklevel=3,
)
package = globals["__name__"]
if "__path__" not in globals:
package = package.rpartition(".")[0]
return package
def _normalize_path(path):
"""Normalize a path by ensuring it is a string.
If the resulting string contains path separators, an exception is raised.
"""
parent, file_name = os.path.split(path)
if parent:
raise ValueError("{!r} must be only a file name".format(path))
else:
return file_name
| pytorch-master | torch/package/_importlib.py |
import os.path
from glob import glob
from typing import cast
import torch
from torch.types import Storage
# because get_storage_from_record returns a tensor!?
class _HasStorage(object):
def __init__(self, storage):
self._storage = storage
def storage(self):
return self._storage
class DirectoryReader(object):
"""
Class to allow PackageImporter to operate on unzipped packages. Methods
copy the behavior of the internal PyTorchFileReader class (which is used for
accessing packages in all other cases).
N.B.: ScriptObjects are not depickleable or accessible via this DirectoryReader
class due to ScriptObjects requiring an actual PyTorchFileReader instance.
"""
def __init__(self, directory):
self.directory = directory
def get_record(self, name):
filename = f"{self.directory}/{name}"
with open(filename, "rb") as f:
return f.read()
def get_storage_from_record(self, name, numel, dtype):
filename = f"{self.directory}/{name}"
nbytes = torch._utils._element_size(dtype) * numel
storage = cast(Storage, torch.UntypedStorage)
return _HasStorage(storage.from_file(filename=filename, nbytes=nbytes))
def has_record(self, path):
full_path = os.path.join(self.directory, path)
return os.path.isfile(full_path)
def get_all_records(
self,
):
files = []
for filename in glob(f"{self.directory}/**", recursive=True):
if not os.path.isdir(filename):
files.append(filename[len(self.directory) + 1 :])
return files
| pytorch-master | torch/package/_directory_reader.py |
from collections import deque
from typing import List, Set
class DiGraph:
"""Really simple unweighted directed graph data structure to track dependencies.
The API is pretty much the same as networkx so if you add something just
copy their API.
"""
def __init__(self):
# Dict of node -> dict of arbitrary attributes
self._node = {}
# Nested dict of node -> successor node -> nothing.
# (didn't implement edge data)
self._succ = {}
# Nested dict of node -> predecessor node -> nothing.
self._pred = {}
# Keep track of the order in which nodes are added to
# the graph.
self._node_order = {}
self._insertion_idx = 0
def add_node(self, n, **kwargs):
"""Add a node to the graph.
Args:
n: the node. Can we any object that is a valid dict key.
**kwargs: any attributes you want to attach to the node.
"""
if n not in self._node:
self._node[n] = kwargs
self._succ[n] = {}
self._pred[n] = {}
self._node_order[n] = self._insertion_idx
self._insertion_idx += 1
else:
self._node[n].update(kwargs)
def add_edge(self, u, v):
"""Add an edge to graph between nodes ``u`` and ``v``
``u`` and ``v`` will be created if they do not already exist.
"""
# add nodes
self.add_node(u)
self.add_node(v)
# add the edge
self._succ[u][v] = True
self._pred[v][u] = True
def successors(self, n):
"""Returns an iterator over successor nodes of n."""
try:
return iter(self._succ[n])
except KeyError as e:
raise ValueError(f"The node {n} is not in the digraph.") from e
def predecessors(self, n):
"""Returns an iterator over predecessors nodes of n."""
try:
return iter(self._pred[n])
except KeyError as e:
raise ValueError(f"The node {n} is not in the digraph.") from e
@property
def edges(self):
"""Returns an iterator over all edges (u, v) in the graph"""
for n, successors in self._succ.items():
for succ in successors:
yield n, succ
@property
def nodes(self):
"""Returns a dictionary of all nodes to their attributes."""
return self._node
def __iter__(self):
"""Iterate over the nodes."""
return iter(self._node)
def __contains__(self, n):
"""Returns True if ``n`` is a node in the graph, False otherwise."""
try:
return n in self._node
except TypeError:
return False
def forward_transitive_closure(self, src: str) -> Set[str]:
"""Returns a set of nodes that are reachable from src"""
result = set(src)
working_set = deque(src)
while len(working_set) > 0:
cur = working_set.popleft()
for n in self.successors(cur):
if n not in result:
result.add(n)
working_set.append(n)
return result
def backward_transitive_closure(self, src: str) -> Set[str]:
"""Returns a set of nodes that are reachable from src in reverse direction"""
result = set(src)
working_set = deque(src)
while len(working_set) > 0:
cur = working_set.popleft()
for n in self.predecessors(cur):
if n not in result:
result.add(n)
working_set.append(n)
return result
def all_paths(self, src: str, dst: str):
"""Returns a subgraph rooted at src that shows all the paths to dst."""
result_graph = DiGraph()
# First compute forward transitive closure of src (all things reachable from src).
forward_reachable_from_src = self.forward_transitive_closure(src)
if dst not in forward_reachable_from_src:
return result_graph
# Second walk the reverse dependencies of dst, adding each node to
# the output graph iff it is also present in forward_reachable_from_src.
# we don't use backward_transitive_closures for optimization purposes
working_set = deque(dst)
while len(working_set) > 0:
cur = working_set.popleft()
for n in self.predecessors(cur):
if n in forward_reachable_from_src:
result_graph.add_edge(n, cur)
# only explore further if its reachable from src
working_set.append(n)
return result_graph.to_dot()
def first_path(self, dst: str) -> List[str]:
"""Returns a list of nodes that show the first path that resulted in dst being added to the graph."""
path = []
while dst:
path.append(dst)
candidates = self._pred[dst].keys()
dst, min_idx = "", None
for candidate in candidates:
idx = self._node_order.get(candidate, None)
if idx is None:
break
if min_idx is None or idx < min_idx:
min_idx = idx
dst = candidate
return list(reversed(path))
def to_dot(self) -> str:
"""Returns the dot representation of the graph.
Returns:
A dot representation of the graph.
"""
edges = "\n".join(f'"{f}" -> "{t}";' for f, t in self.edges)
return f"""\
digraph G {{
rankdir = LR;
node [shape=box];
{edges}
}}
"""
| pytorch-master | torch/package/_digraph.py |
import sys
from typing import Any, Callable, Iterable, List, Tuple
__all__ = ["trace_dependencies"]
def trace_dependencies(
callable: Callable[[Any], Any], inputs: Iterable[Tuple[Any, ...]]
) -> List[str]:
"""Trace the execution of a callable in order to determine which modules it uses.
Args:
callable: The callable to execute and trace.
inputs: The input to use during tracing. The modules used by 'callable' when invoked by each set of inputs
are union-ed to determine all modules used by the callable for the purpooses of packaging.
Returns: A list of the names of all modules used during callable execution.
"""
modules_used = set()
def record_used_modules(frame, event, arg):
# If the event being profiled is not a Python function
# call, there is nothing to do.
if event != "call":
return
# This is the name of the function that was called.
name = frame.f_code.co_name
module = None
# Try to determine the name of the module that the function
# is in:
# 1) Check the global namespace of the frame.
# 2) Check the local namespace of the frame.
# 3) To handle class instance method calls, check
# the attribute named 'name' of the object
# in the local namespace corresponding to "self".
if name in frame.f_globals:
module = frame.f_globals[name].__module__
elif name in frame.f_locals:
module = frame.f_locals[name].__module__
elif "self" in frame.f_locals:
method = getattr(frame.f_locals["self"], name, None)
module = method.__module__ if method else None
# If a module was found, add it to the set of used modules.
if module:
modules_used.add(module)
try:
# Attach record_used_modules as the profiler function.
sys.setprofile(record_used_modules)
# Execute the callable with all inputs.
for inp in inputs:
callable(*inp)
finally:
# Detach the profiler function.
sys.setprofile(None)
return list(modules_used)
| pytorch-master | torch/package/analyze/trace_dependencies.py |
from typing import Dict, List
from ..package_exporter import PackagingError
__all__ = ["find_first_use_of_broken_modules"]
def find_first_use_of_broken_modules(exc: PackagingError) -> Dict[str, List[str]]:
"""
Find all broken modules in a PackagingError, and for each one, return the
dependency path in which the module was first encountered.
E.g. broken module m.n.o was added to a dependency graph while processing a.b.c,
then re-encountered while processing d.e.f. This method would return
{'m.n.o': ['a', 'b', 'c']}
Args:
exc: a PackagingError
Returns: A dict from broken module names to lists of module names in the path.
"""
assert isinstance(exc, PackagingError), "exception must be a PackagingError"
uses = {}
broken_module_names = [
m for m, attr in exc.dependency_graph.nodes.items() if attr.get("error", False)
]
for module_name in broken_module_names:
path = exc.dependency_graph.first_path(module_name)
uses[module_name] = path
return uses
| pytorch-master | torch/package/analyze/find_first_use_of_broken_modules.py |
from .find_first_use_of_broken_modules import find_first_use_of_broken_modules
from .trace_dependencies import trace_dependencies
| pytorch-master | torch/package/analyze/__init__.py |
from types import ModuleType
from typing import Any
from .._mangling import is_mangled
def is_from_package(obj: Any) -> bool:
"""
Return whether an object was loaded from a package.
Note: packaged objects from externed modules will return ``False``.
"""
if type(obj) == ModuleType:
return is_mangled(obj.__name__)
else:
return is_mangled(type(obj).__module__)
| pytorch-master | torch/package/analyze/is_from_package.py |
#!/usr/bin/env python3
## @package process
# Module doxygen.process
# Script to insert preamble for doxygen and regen API docs
import os
import shutil
# Module caffe2...caffe2.python.control_test
def insert(originalfile, first_line, description):
with open(originalfile, 'r') as f:
f1 = f.readline()
if(f1.find(first_line) < 0):
docs = first_line + description + f1
with open('newfile.txt', 'w') as f2:
f2.write(docs)
f2.write(f.read())
os.rename('newfile.txt', originalfile)
else:
print('already inserted')
# move up from /caffe2_root/doxygen
os.chdir("..")
os.system("git checkout caffe2/contrib/.")
os.system("git checkout caffe2/distributed/.")
os.system("git checkout caffe2/experiments/.")
os.system("git checkout caffe2/python/.")
for root, dirs, files in os.walk("."):
for file in files:
if (file.endswith(".py") and not file.endswith("_test.py") and not file.endswith("__.py")):
filepath = os.path.join(root, file)
print(("filepath: " + filepath))
directory = os.path.dirname(filepath)[2:]
directory = directory.replace("/", ".")
print("directory: " + directory)
name = os.path.splitext(file)[0]
first_line = "## @package " + name
description = "\n# Module " + directory + "." + name + "\n"
print(first_line, description)
insert(filepath, first_line, description)
if os.path.exists("doxygen/doxygen-python"):
print("Looks like you ran this before, so we need to cleanup those old files...")
shutil.rmtree("doxygen/doxygen-python")
else:
os.makedirs("doxygen/doxygen-python")
if os.path.exists("doxygen/doxygen-c"):
print("Looks like you ran this before, so we need to cleanup those old files...")
shutil.rmtree("doxygen/doxygen-c")
else:
os.makedirs("doxygen/doxygen-c")
os.system("doxygen .Doxyfile-python")
os.system("doxygen .Doxyfile-c")
| pytorch-master | docs/caffe2/process.py |
# -*- coding: utf-8 -*-
#
# PyTorch documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 23 13:31:47 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
from os import path
import re
# import sys
import pkgutil
# source code directory, relative to this file, for sphinx-autobuild
# sys.path.insert(0, os.path.abspath('../..'))
import torch
try:
import torchvision # noqa: F401
except ImportError:
import warnings
warnings.warn('unable to load "torchvision" package')
RELEASE = os.environ.get('RELEASE', False)
import pytorch_sphinx_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '3.1.2'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinxcontrib.katex',
'sphinx.ext.autosectionlabel',
'sphinx_copybutton',
'sphinx_panels'
]
# build the templated autosummary files
autosummary_generate = True
numpydoc_show_class_members = False
# Theme has bootstrap already
panels_add_bootstrap_css = False
# autosectionlabel throws warnings if section names are duplicated.
# The following tells autosectionlabel to not throw a warning for
# duplicated section names that are in different documents.
autosectionlabel_prefix_document = True
# katex options
#
#
katex_prerender = True
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# TODO: document these and remove them from here.
coverage_ignore_functions = [
# torch
"typename",
# torch.autograd
"register_py_tensor_class_for_device",
"variable",
# torch.cuda
"check_error",
"cudart",
"is_bf16_supported",
# torch.distributed.autograd
"is_available",
# torch.distributed.elastic.events
"construct_and_record_rdzv_event",
"record_rdzv_event",
# torch.distributed.elastic.metrics
"initialize_metrics",
# torch.distributed.elastic.rendezvous.registry
"get_rendezvous_handler",
# torch.distributed.launch
"launch",
"main",
"parse_args",
# torch.distributed.rpc
"is_available",
# torch.distributed.run
"config_from_args",
"determine_local_world_size",
"get_args_parser",
"get_rdzv_endpoint",
"get_use_env",
"main",
"parse_args",
"parse_min_max_nnodes",
"run",
"run_script_path",
# torch.distributions.constraints
"is_dependent",
# torch.hub
"import_module",
# torch.jit
"export_opnames",
# torch.jit.unsupported_tensor_ops
"execWrapper",
# torch.onnx
"unregister_custom_op_symbolic",
# torch.ao.quantization
"default_eval_fn",
# torch.backends
"disable_global_flags",
"flags_frozen",
# torch.distributed.algorithms.ddp_comm_hooks
"register_ddp_comm_hook",
# torch.nn
"factory_kwargs",
# torch.nn.parallel
"DistributedDataParallelCPU",
# torch.utils
"set_module",
# torch.utils.model_dump
"burn_in_info",
"get_info_and_burn_skeleton",
"get_inline_skeleton",
"get_model_info",
"get_storage_info",
"hierarchical_pickle",
]
coverage_ignore_classes = [
# torch
"FatalError",
"QUInt2x4Storage",
"Size",
"Storage",
"Stream",
"Tensor",
"finfo",
"iinfo",
"qscheme",
"AggregationType",
"AliasDb",
"AnyType",
"Argument",
"ArgumentSpec",
"BenchmarkConfig",
"BenchmarkExecutionStats",
"Block",
"BoolType",
"BufferDict",
"CallStack",
"Capsule",
"ClassType",
"Code",
"CompleteArgumentSpec",
"ComplexType",
"ConcreteModuleType",
"ConcreteModuleTypeBuilder",
"DeepCopyMemoTable",
"DeserializationStorageContext",
"DeviceObjType",
"DictType",
"EnumType",
"ExecutionPlan",
"FileCheck",
"FloatType",
"FunctionSchema",
"Gradient",
"Graph",
"GraphExecutorState",
"IODescriptor",
"InferredType",
"IntType",
"InterfaceType",
"ListType",
"LockingLogger",
"MobileOptimizerType",
"ModuleDict",
"Node",
"NoneType",
"NoopLogger",
"NumberType",
"OperatorInfo",
"OptionalType",
"ParameterDict",
"PyObjectType",
"PyTorchFileReader",
"PyTorchFileWriter",
"RRefType",
"ScriptClass",
"ScriptClassFunction",
"ScriptDict",
"ScriptDictIterator",
"ScriptDictKeyIterator",
"ScriptList",
"ScriptListIterator",
"ScriptMethod",
"ScriptModule",
"ScriptModuleSerializer",
"ScriptObject",
"ScriptObjectProperty",
"SerializationStorageContext",
"StaticModule",
"StringType",
"SymIntType",
"ThroughputBenchmark",
"TracingState",
"TupleType",
"Type",
"UnionType",
"Use",
"Value",
# torch.cuda
"BFloat16Storage",
"BFloat16Tensor",
"BoolStorage",
"BoolTensor",
"ByteStorage",
"ByteTensor",
"CharStorage",
"CharTensor",
"ComplexDoubleStorage",
"ComplexFloatStorage",
"CudaError",
"DeferredCudaCallError",
"DoubleStorage",
"DoubleTensor",
"FloatStorage",
"FloatTensor",
"HalfStorage",
"HalfTensor",
"IntStorage",
"IntTensor",
"LongStorage",
"LongTensor",
"ShortStorage",
"ShortTensor",
"cudaStatus",
# torch.distributed.elastic.multiprocessing.errors
"ChildFailedError",
"ProcessFailure",
# torch.distributions.constraints
"cat",
"greater_than",
"greater_than_eq",
"half_open_interval",
"independent",
"integer_interval",
"interval",
"less_than",
"multinomial",
"stack",
# torch.distributions.transforms
"AffineTransform",
"CatTransform",
"ComposeTransform",
"CorrCholeskyTransform",
"CumulativeDistributionTransform",
"ExpTransform",
"IndependentTransform",
"PowerTransform",
"ReshapeTransform",
"SigmoidTransform",
"SoftmaxTransform",
"SoftplusTransform",
"StackTransform",
"StickBreakingTransform",
"TanhTransform",
"Transform",
# torch.jit
"CompilationUnit",
"Error",
"Future",
"ScriptFunction",
# torch.onnx
"CheckerError",
"ExportTypes",
# torch.backends
"ContextProp",
"PropModule",
# torch.backends.cuda
"cuBLASModule",
"cuFFTPlanCache",
"cuFFTPlanCacheAttrContextProp",
"cuFFTPlanCacheManager",
# torch.distributed.algorithms.ddp_comm_hooks
"DDPCommHookType",
# torch.jit.mobile
"LiteScriptModule",
# torch.nn.quantized.modules
"DeQuantize",
"Quantize",
# torch.utils.backcompat
"Warning",
"SymIntNode"
]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PyTorch'
copyright = '2022, PyTorch Contributors'
author = 'PyTorch Contributors'
torch_version = str(torch.__version__)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# TODO: change to [:2] at v1.0
version = 'master (' + torch_version + ' )'
# The full version, including alpha/beta/rc tags.
# TODO: verify this works as expected
release = 'master'
# Customized html_title here.
# Default is " ".join(project, release, "documentation") if not set
if RELEASE:
# Turn 1.11.0aHASH into 1.11
# Note: the release candidates should no longer have the aHASH suffix, but in any
# case we wish to leave only major.minor, even for rc builds.
version = '.'.join(torch_version.split('.')[:2])
html_title = " ".join((project, version, "documentation"))
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Disable docstring inheritance
autodoc_inherit_docstrings = False
# Disable displaying type annotations, these can be very verbose
autodoc_typehints = 'none'
# Enable overriding of function signatures in the first line of the docstring.
autodoc_docstring_signature = True
# -- katex javascript in header
#
# def setup(app):
# app.add_javascript("https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.js")
# -- Options for HTML output ----------------------------------------------
#
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'pytorch_project': 'docs',
'canonical_url': 'https://pytorch.org/docs/stable/',
'collapse_navigation': False,
'display_version': True,
'logo_only': True,
'analytics_id': 'UA-117752657-2',
}
html_logo = '_static/img/pytorch-logo-dark-unstable.png'
if RELEASE:
html_logo = '_static/img/pytorch-logo-dark.svg'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'css/jit.css',
]
from sphinx.ext.coverage import CoverageBuilder
def coverage_post_process(app, exception):
if exception is not None:
return
# Only run this test for the coverage build
if not isinstance(app.builder, CoverageBuilder):
return
if not torch.distributed.is_available():
raise RuntimeError("The coverage tool cannot run with a version "
"of PyTorch that was built with USE_DISTRIBUTED=0 "
"as this module's API changes.")
# These are all the modules that have "automodule" in an rst file
# These modules are the ones for which coverage is checked
# Here, we make sure that no module is missing from that list
modules = app.env.domaindata['py']['modules']
# We go through all the torch submodules and make sure they are
# properly tested
missing = set()
def is_not_internal(modname):
split_name = modname.split(".")
for name in split_name:
if name[0] == "_":
return False
return True
# The walk function does not return the top module
if "torch" not in modules:
missing.add("torch")
for _, modname, ispkg in pkgutil.walk_packages(path=torch.__path__,
prefix=torch.__name__ + '.'):
if ispkg and is_not_internal(modname):
if modname not in modules:
missing.add(modname)
output = []
if missing:
mods = ", ".join(missing)
output.append(f"\nYou added the following module(s) to the PyTorch namespace '{mods}' "
"but they have no corresponding entry in a doc .rst file. You should "
"either make sure that the .rst file that contains the module's documentation "
"properly contains either '.. automodule:: mod_name' (if you do not want "
"the paragraph added by the automodule, you can simply use '.. py:module:: mod_name') "
" or make the module private (by appending an '_' at the beginning of its name).")
# The output file is hard-coded by the coverage tool
# Our CI is setup to fail if any line is added to this file
output_file = path.join(app.outdir, 'python.txt')
if output:
with open(output_file, "a") as f:
for o in output:
f.write(o)
def process_docstring(app, what_, name, obj, options, lines):
"""
Custom process to transform docstring lines Remove "Ignore" blocks
Args:
app (sphinx.application.Sphinx): the Sphinx application object
what (str):
the type of the object which the docstring belongs to (one of
"module", "class", "exception", "function", "method", "attribute")
name (str): the fully qualified name of the object
obj: the object itself
options: the options given to the directive: an object with
attributes inherited_members, undoc_members, show_inheritance
and noindex that are true if the flag option of same name was
given to the auto directive
lines (List[str]): the lines of the docstring, see above
References:
https://www.sphinx-doc.org/en/1.5.1/_modules/sphinx/ext/autodoc.html
https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html
"""
import re
remove_directives = [
# Remove all xdoctest directives
re.compile(r'\s*>>>\s*#\s*x?doctest:\s*.*'),
re.compile(r'\s*>>>\s*#\s*x?doc:\s*.*'),
]
filtered_lines = [
line for line in lines
if not any(pat.match(line) for pat in remove_directives)
]
# Modify the lines inplace
lines[:] = filtered_lines
# make sure there is a blank line at the end
if lines and lines[-1].strip():
lines.append('')
# Called automatically by Sphinx, making this `conf.py` an "extension".
def setup(app):
# NOTE: in Sphinx 1.8+ `html_css_files` is an official configuration value
# and can be moved outside of this function (and the setup(app) function
# can be deleted).
html_css_files = [
'https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.css'
]
# In Sphinx 1.8 it was renamed to `add_css_file`, 1.7 and prior it is
# `add_stylesheet` (deprecated in 1.8).
add_css = getattr(app, 'add_css_file', app.add_stylesheet)
for css_file in html_css_files:
add_css(css_file)
app.connect("build-finished", coverage_post_process)
app.connect('autodoc-process-docstring', process_docstring)
# From PyTorch 1.5, we now use autogenerated files to document classes and
# functions. This breaks older references since
# https://pytorch.org/docs/stable/torch.html#torch.flip
# moved to
# https://pytorch.org/docs/stable/generated/torch.flip.html
# which breaks older links from blog posts, stack overflow answers and more.
# To mitigate that, we add an id="torch.flip" in an appropriated place
# in torch.html by overriding the visit_reference method of html writers.
# Someday this can be removed, once the old links fade away
from sphinx.writers import html, html5
def replace(Klass):
old_call = Klass.visit_reference
def visit_reference(self, node):
if 'refuri' in node and 'generated' in node.get('refuri'):
ref = node.get('refuri')
ref_anchor = ref.split('#')
if len(ref_anchor) > 1:
# Only add the id if the node href and the text match,
# i.e. the href is "torch.flip#torch.flip" and the content is
# "torch.flip" or "flip" since that is a signal the node refers
# to autogenerated content
anchor = ref_anchor[1]
txt = node.parent.astext()
if txt == anchor or txt == anchor.split('.')[-1]:
self.body.append('<p id="{}"/>'.format(ref_anchor[1]))
return old_call(self, node)
Klass.visit_reference = visit_reference
replace(html.HTMLTranslator)
replace(html5.HTML5Translator)
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyTorchdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pytorch.tex', 'PyTorch Documentation',
'Torch Contributors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'PyTorch', 'PyTorch Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyTorch', 'PyTorch Documentation',
author, 'PyTorch', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable', None),
}
# -- A patch that prevents Sphinx from cross-referencing ivar tags -------
# See http://stackoverflow.com/a/41184353/3343043
from docutils import nodes
from sphinx.util.docfields import TypedField
from sphinx import addnodes
import sphinx.ext.doctest
# Without this, doctest adds any example with a `>>>` as a test
doctest_test_doctest_blocks = ''
doctest_default_flags = sphinx.ext.doctest.doctest.ELLIPSIS
doctest_global_setup = '''
import torch
try:
import torchvision
except ImportError:
torchvision = None
'''
def patched_make_field(self, types, domain, items, **kw):
# `kw` catches `env=None` needed for newer sphinx while maintaining
# backwards compatibility when passed along further down!
# type: (List, unicode, Tuple) -> nodes.field
def handle_item(fieldarg, content):
par = nodes.paragraph()
par += addnodes.literal_strong('', fieldarg) # Patch: this line added
# par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
# addnodes.literal_strong))
if fieldarg in types:
par += nodes.Text(' (')
# NOTE: using .pop() here to prevent a single type node to be
# inserted twice into the doctree, which leads to
# inconsistencies later when references are resolved
fieldtype = types.pop(fieldarg)
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
typename = fieldtype[0].astext()
builtin_types = ['int', 'long', 'float', 'bool', 'type']
for builtin_type in builtin_types:
pattern = fr'(?<![\w.]){builtin_type}(?![\w.])'
repl = f'python:{builtin_type}'
typename = re.sub(pattern, repl, typename)
par.extend(self.make_xrefs(self.typerolename, domain, typename,
addnodes.literal_emphasis, **kw))
else:
par += fieldtype
par += nodes.Text(')')
par += nodes.Text(' -- ')
par += content
return par
fieldname = nodes.field_name('', self.label)
if len(items) == 1 and self.can_collapse:
fieldarg, content = items[0]
bodynode = handle_item(fieldarg, content)
else:
bodynode = self.list_type()
for fieldarg, content in items:
bodynode += nodes.list_item('', handle_item(fieldarg, content))
fieldbody = nodes.field_body('', bodynode)
return nodes.field('', fieldname, fieldbody)
TypedField.make_field = patched_make_field
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
| pytorch-master | docs/source/conf.py |
"""
This script will generate input-out plots for all of the activation
functions. These are for use in the documentation, and potentially in
online tutorials.
"""
from pathlib import Path
import torch
import matplotlib
from matplotlib import pyplot as plt
matplotlib.use("Agg")
# Create a directory for the images, if it doesn't exist
ACTIVATION_IMAGE_PATH = Path(__file__).parent / "activation_images"
if not ACTIVATION_IMAGE_PATH.exists():
ACTIVATION_IMAGE_PATH.mkdir()
# In a refactor, these ought to go into their own module or entry
# points so we can generate this list programmaticly
functions = [
torch.nn.ELU(),
torch.nn.Hardshrink(),
torch.nn.Hardtanh(),
torch.nn.Hardsigmoid(),
torch.nn.Hardswish(),
torch.nn.LeakyReLU(negative_slope=0.1),
torch.nn.LogSigmoid(),
torch.nn.PReLU(),
torch.nn.ReLU(),
torch.nn.ReLU6(),
torch.nn.RReLU(),
torch.nn.SELU(),
torch.nn.SiLU(),
torch.nn.Mish(),
torch.nn.CELU(),
torch.nn.GELU(),
torch.nn.Sigmoid(),
torch.nn.Softplus(),
torch.nn.Softshrink(),
torch.nn.Softsign(),
torch.nn.Tanh(),
torch.nn.Tanhshrink(),
]
def plot_function(function, **args):
"""
Plot a function on the current plot. The additional arguments may
be used to specify color, alpha, etc.
"""
xrange = torch.arange(-7.0, 7.0, 0.01) # We need to go beyond 6 for ReLU6
plt.plot(xrange.numpy(), function(xrange).detach().numpy(), **args)
# Step through all the functions
for function in functions:
function_name = function._get_name()
plot_path = ACTIVATION_IMAGE_PATH / f"{function_name}.png"
if not plot_path.exists():
# Start a new plot
plt.clf()
plt.grid(color="k", alpha=0.2, linestyle="--")
# Plot the current function
plot_function(function)
plt.title(function)
plt.xlabel("Input")
plt.ylabel("Output")
plt.xlim([-7, 7])
plt.ylim([-7, 7])
# And save it
plt.savefig(plot_path)
print(f"Saved activation image for {function_name} at {plot_path}")
| pytorch-master | docs/source/scripts/build_activation_images.py |
"""
This script will generate default values of quantization configs.
These are for use in the documentation.
"""
import torch
from torch.ao.quantization.backend_config import get_native_backend_config_dict
from torch.ao.quantization.backend_config.utils import (
entry_to_pretty_str,
remove_boolean_dispatch_from_name,
)
import os.path
# Create a directory for the images, if it doesn't exist
QUANTIZATION_BACKEND_CONFIG_IMAGE_PATH = os.path.join(
os.path.realpath(os.path.join(__file__, "..")),
"quantization_backend_configs"
)
if not os.path.exists(QUANTIZATION_BACKEND_CONFIG_IMAGE_PATH):
os.mkdir(QUANTIZATION_BACKEND_CONFIG_IMAGE_PATH)
output_path = os.path.join(QUANTIZATION_BACKEND_CONFIG_IMAGE_PATH, "default_backend_config.txt")
with open(output_path, "w") as f:
native_backend_config_dict = get_native_backend_config_dict()
configs = native_backend_config_dict['configs']
def _sort_key_func(entry):
pattern = entry['pattern']
while isinstance(pattern, tuple):
pattern = pattern[-1]
pattern = remove_boolean_dispatch_from_name(pattern)
if not isinstance(pattern, str):
# methods are already strings
pattern = torch.typename(pattern)
# we want
#
# torch.nn.modules.pooling.AdaptiveAvgPool1d
#
# and
#
# torch._VariableFunctionsClass.adaptive_avg_pool1d
#
# to be next to each other, so convert to all lower case
# and remove the underscores, and compare the last part
# of the string
pattern_str_normalized = pattern.lower().replace('_', '')
key = pattern_str_normalized.split('.')[-1]
return key
configs.sort(key=_sort_key_func)
entries = []
for entry in configs:
entries.append(entry_to_pretty_str(entry))
entries = ",\n".join(entries)
f.write(entries)
| pytorch-master | docs/source/scripts/build_quantization_configs.py |
"""
This script generates a CSV table with all ATen operators
supported by `torch.onnx.export`. The generated table is included by
docs/source/onnx_supported_aten_list.rst.
"""
import os
from torch.onnx import _onnx_supported_ops
# Constants
BUILD_DIR = "build"
AUTO_GEN_ATEN_OPS_CSV_FILE = "auto_gen_aten_op_list.csv"
def main():
os.makedirs(BUILD_DIR, exist_ok=True)
aten_list = _onnx_supported_ops.onnx_supported_ops()
with open(os.path.join(BUILD_DIR, AUTO_GEN_ATEN_OPS_CSV_FILE), "w") as f:
f.write("Operator,opset_version(s)\n")
for name, opset_version in aten_list:
f.write(f'"``{name}``","{opset_version}"\n')
if __name__ == "__main__":
main()
| pytorch-master | docs/source/scripts/onnx/build_onnx_supported_aten_op_csv_table.py |
# -*- coding: utf-8 -*-
#
# PyTorch documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 23 13:31:47 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# NB: C++ API doc generation using doxygen / breathe / exhale is currently only
# enabled on nightlies (and not trunk or on PRs) due to OOM errors in CI.
# See https://github.com/pytorch/pytorch/issues/79992.
import os
# sys.path.insert(0, os.path.abspath('.'))
import textwrap
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '3.1.2'
run_doxygen = os.environ.get('RUN_DOXYGEN', "false") == "true"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
] + ([
'breathe',
'exhale'
] if run_doxygen else [])
intersphinx_mapping = {
'pytorch': ('https://pytorch.org/docs/master', None)
}
# Setup absolute paths for communicating with breathe / exhale where
# items are expected / should be trimmed by.
# This file is {repo_root}/docs/cpp/source/conf.py
this_file_dir = os.path.abspath(os.path.dirname(__file__))
doxygen_xml_dir = os.path.join(
os.path.dirname(this_file_dir), # {repo_root}/docs/cpp
'build', # {repo_root}/docs/cpp/build
'xml' # {repo_root}/docs/cpp/build/xml
)
repo_root = os.path.dirname( # {repo_root}
os.path.dirname( # {repo_root}/docs
os.path.dirname( # {repo_root}/docs/cpp
this_file_dir # {repo_root}/docs/cpp/source
)
)
)
breathe_projects = {"PyTorch": doxygen_xml_dir}
breathe_default_project = "PyTorch"
# Setup the exhale extension
exhale_args = {
############################################################################
# These arguments are required. #
############################################################################
"containmentFolder": "./api",
"rootFileName": "library_root.rst",
"rootFileTitle": "Library API",
"doxygenStripFromPath": repo_root,
############################################################################
# Suggested optional arguments. #
############################################################################
"createTreeView": True,
"exhaleExecutesDoxygen": True,
"exhaleUseDoxyfile": True,
"verboseBuild": True,
############################################################################
# HTML Theme specific configurations. #
############################################################################
# Fix broken Sphinx RTD Theme 'Edit on GitHub' links
# Search for 'Edit on GitHub' on the FAQ:
# http://exhale.readthedocs.io/en/latest/faq.html
"pageLevelConfigMeta": ":github_url: https://github.com/pytorch/pytorch",
############################################################################
# Individual page layout example configuration. #
############################################################################
# Example of adding contents directives on custom kinds with custom title
"contentsTitle": "Page Contents",
"kindsWithContentsDirectives": ["class", "file", "namespace", "struct"],
# Exclude PIMPL files from class hierarchy tree and namespace pages.
"listingExclude": [r".*Impl$"],
############################################################################
# Main library page layout example configuration. #
############################################################################
"afterTitleDescription": textwrap.dedent(u'''
Welcome to the developer reference for the PyTorch C++ API.
'''),
}
# Tell sphinx what the primary language being documented is.
primary_domain = 'cpp'
# Tell sphinx what the pygments highlight language should be.
highlight_language = 'cpp'
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PyTorch'
copyright = '2022, PyTorch Contributors'
author = 'PyTorch Contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# TODO: change to [:2] at v1.0
version = 'master'
# The full version, including alpha/beta/rc tags.
# TODO: verify this works as expected
release = 'master'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'canonical_url': 'https://pytorch.org/docs/stable/',
'pytorch_project': 'docs',
'collapse_navigation': False,
'display_version': True,
'logo_only': True,
}
# NOTE: sharing python docs resources
html_logo = os.path.join(
repo_root, 'docs', 'source', '_static', 'img', 'pytorch-logo-dark-unstable.png'
)
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# NOTE: sharing python docs resources
html_static_path = [os.path.join(repo_root, 'docs', 'cpp', 'source', '_static')]
# Called automatically by Sphinx, making this `conf.py` an "extension".
def setup(app):
# NOTE: in Sphinx 1.8+ `html_css_files` is an official configuration value
# and can be moved outside of this function (and the setup(app) function
# can be deleted).
html_css_files = ['cpp_theme.css']
# In Sphinx 1.8 it was renamed to `add_css_file`, 1.7 and prior it is
# `add_stylesheet` (deprecated in 1.8).
add_css = getattr(app, 'add_css_file', app.add_stylesheet)
for css_file in html_css_files:
add_css(css_file)
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
# htmlhelp_basename = 'PyTorchdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pytorch.tex', 'PyTorch Documentation',
'Torch Contributors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'PyTorch', 'PyTorch Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyTorch', 'PyTorch Documentation',
author, 'PyTorch', 'One line description of project.',
'Miscellaneous'),
]
| pytorch-master | docs/cpp/source/conf.py |
import torch
import torchvision
from torch.backends._coreml.preprocess import (
CompileSpec,
TensorSpec,
CoreMLComputeUnit,
)
def mobilenetv2_spec():
return {
"forward": CompileSpec(
inputs=(
TensorSpec(
shape=[1, 3, 224, 224],
),
),
outputs=(
TensorSpec(
shape=[1, 1000],
),
),
backend=CoreMLComputeUnit.CPU,
allow_low_precision=True,
),
}
def main():
model = torchvision.models.mobilenet_v2(pretrained=True)
model.eval()
example = torch.rand(1, 3, 224, 224)
model = torch.jit.trace(model, example)
compile_spec = mobilenetv2_spec()
mlmodel = torch._C._jit_to_backend("coreml", model, compile_spec)
print(mlmodel._c._get_method("forward").graph)
mlmodel._save_for_lite_interpreter("../models/model_coreml.ptl")
if __name__ == "__main__":
main()
| pytorch-master | ios/TestApp/benchmark/coreml_backend.py |
import torch
import torchvision
from torch.utils.mobile_optimizer import optimize_for_mobile
model = torchvision.models.mobilenet_v2(pretrained=True)
model.eval()
example = torch.rand(1, 3, 224, 224)
traced_script_module = torch.jit.trace(model, example)
optimized_scripted_module = optimize_for_mobile(traced_script_module)
torch.jit.save(optimized_scripted_module, '../models/model.pt')
exported_optimized_scripted_module = optimized_scripted_module._save_for_lite_interpreter("../models/model_lite.ptl")
| pytorch-master | ios/TestApp/benchmark/trace_model.py |
import torch
import torchvision
import yaml
model = torchvision.models.mobilenet_v2(pretrained=True)
model.eval()
example = torch.rand(1, 3, 224, 224)
traced_script_module = torch.jit.trace(model, example)
ops = torch.jit.export_opnames(traced_script_module)
with open('mobilenetv2.yaml', 'w') as output:
yaml.dump(ops, output)
| pytorch-master | ios/TestApp/custom_build/custom_build.py |
#!/usr/bin/env python3
from __future__ import print_function
import os
CPUINFO_SOURCES = {
None: [
"init.c",
"api.c",
"cache.c",
],
"defined(__linux__)": [
"linux/multiline.c",
"linux/cpulist.c",
"linux/mockfile.c",
"linux/smallfile.c",
"linux/processors.c",
],
"defined(__MACH__) && defined(__APPLE__)": [
"mach/topology.c",
],
"defined(__i386__) || defined(__i686__) || defined(__x86_64__) || defined(_WIN32)": [
"x86/cache/init.c",
"x86/cache/deterministic.c",
"x86/cache/descriptor.c",
"x86/info.c",
"x86/mockcpuid.c",
"x86/isa.c",
"x86/topology.c",
"x86/name.c",
"x86/init.c",
"x86/uarch.c",
"x86/vendor.c",
],
"(defined(__i386__) || defined(__i686__) || defined(__x86_64__)) && defined(__linux__)": [
"x86/linux/init.c",
"x86/linux/cpuinfo.c",
],
"(defined(__i386__) || defined(__i686__) || defined(__x86_64__)) && defined(__MACH__) && defined(__APPLE__)": [
"x86/mach/init.c",
],
"defined(_WIN32)": [
"x86/windows/init.c",
],
"(defined(__arm__) || defined(__aarch64__)) && defined(__linux__)": [
"arm/linux/cpuinfo.c",
"arm/linux/hwcap.c",
"arm/linux/init.c",
"arm/linux/clusters.c",
"arm/linux/midr.c",
"arm/linux/chipset.c",
"arm/tlb.c",
"arm/uarch.c",
"arm/cache.c",
],
"defined(__arm__) && defined(__linux__)": [
"arm/linux/aarch32-isa.c",
],
"defined(__aarch64__) && defined(__linux__)": [
"arm/linux/aarch64-isa.c",
],
"(defined(__arm__) || defined(__aarch64__)) && defined(__ANDROID__)": [
"arm/android/properties.c",
],
"(defined(__arm__) || defined(__aarch64__)) && defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE": [
"arm/mach/init.c",
],
}
if __name__ == "__main__":
for condition, filenames in CPUINFO_SOURCES.items():
for filename in filenames:
filepath = os.path.join("cpuinfo/wrappers", filename)
if not os.path.exists(os.path.dirname(filepath)):
print(filepath)
os.makedirs(os.path.dirname(filepath))
with open(filepath, "w") as wrapper:
print("/* Auto-generated by generate-wrappers.py script. Do not modify */", file=wrapper)
print(file=wrapper)
print("#ifdef __APPLE__", file=wrapper)
print("\t#include <TargetConditionals.h>", file=wrapper)
print("#endif /* __APPLE__ */", file=wrapper)
print(file=wrapper)
if not condition:
print("#include <%s>" % filename, file=wrapper)
else:
# Include source file only if condition is satisfied
print("#if %s" % condition, file=wrapper)
print("#include <%s>" % filename, file=wrapper)
print("#endif /* %s */" % condition, file=wrapper)
| pytorch-master | third_party/generate-cpuinfo-wrappers.py |
#!/usr/bin/env python3
import argparse
import os
mydir = os.path.dirname(__file__)
licenses = {'LICENSE', 'LICENSE.txt', 'LICENSE.rst', 'COPYING.BSD'}
def collect_license(current):
collected = {}
for root, dirs, files in os.walk(current):
license = list(licenses & set(files))
if license:
name = root.split('/')[-1]
license_file = os.path.join(root, license[0])
try:
ident = identify_license(license_file)
except ValueError:
raise ValueError('could not identify license file '
f'for {root}') from None
val = {
'Name': name,
'Files': [root],
'License': ident,
'License_file': [license_file],
}
if name in collected:
# Only add it if the license is different
if collected[name]['License'] == ident:
collected[name]['Files'].append(root)
collected[name]['License_file'].append(license_file)
else:
collected[name + f' ({root})'] = val
else:
collected[name] = val
return collected
def create_bundled(d, outstream, include_files=False):
"""Write the information to an open outstream"""
collected = collect_license(d)
sorted_keys = sorted(collected.keys())
outstream.write('The Pytorch repository and source distributions bundle '
'several libraries that are \n')
outstream.write('compatibly licensed. We list these here.')
files_to_include = []
for k in sorted_keys:
c = collected[k]
files = ',\n '.join(c['Files'])
license_file = ',\n '.join(c['License_file'])
outstream.write('\n\n')
outstream.write(f"Name: {c['Name']}\n")
outstream.write(f"License: {c['License']}\n")
outstream.write(f"Files: {files}\n")
outstream.write(' For details, see')
if include_files:
outstream.write(' the files concatenated below: ')
files_to_include += c['License_file']
else:
outstream.write(': ')
outstream.write(license_file)
for fname in files_to_include:
outstream.write('\n\n')
outstream.write(fname)
outstream.write('\n' + '-' * len(fname) + '\n')
with open(fname, 'r') as fid:
outstream.write(fid.read())
def identify_license(f, exception=''):
"""
Read f and try to identify the license type
This is __very__ rough and probably not legally binding, it is specific for
this repo.
"""
def squeeze(t):
"""Remove 'n and ' ', normalize quotes
"""
t = t.replace('\n', '').replace(' ', '')
t = t.replace('``', '"').replace("''", '"')
return t
with open(f) as fid:
txt = fid.read()
if not exception and 'exception' in txt:
license = identify_license(f, 'exception')
return license + ' with exception'
txt = squeeze(txt)
if 'ApacheLicense' in txt:
# Hmm, do we need to check the text?
return 'Apache-2.0'
elif 'MITLicense' in txt:
# Hmm, do we need to check the text?
return 'MIT'
elif 'BSD-3-ClauseLicense' in txt:
# Hmm, do we need to check the text?
return 'BSD-3-Clause'
elif 'BSD3-ClauseLicense' in txt:
# Hmm, do we need to check the text?
return 'BSD-3-Clause'
elif 'BoostSoftwareLicense-Version1.0' in txt:
# Hmm, do we need to check the text?
return 'BSL-1.0'
elif squeeze("Clarified Artistic License") in txt:
return 'Clarified Artistic License'
elif all([squeeze(m) in txt.lower() for m in bsd3_txt]):
return 'BSD-3-Clause'
elif all([squeeze(m) in txt.lower() for m in bsd3_v1_txt]):
return 'BSD-3-Clause'
elif all([squeeze(m) in txt.lower() for m in bsd2_txt]):
return 'BSD-2-Clause'
elif all([squeeze(m) in txt.lower() for m in bsd3_src_txt]):
return 'BSD-Source-Code'
elif any([squeeze(m) in txt.lower() for m in mit_txt]):
return 'MIT'
else:
raise ValueError('unknown license')
mit_txt = ['permission is hereby granted, free of charge, to any person ',
'obtaining a copy of this software and associated documentation ',
'files (the "software"), to deal in the software without ',
'restriction, including without limitation the rights to use, copy, ',
'modify, merge, publish, distribute, sublicense, and/or sell copies ',
'of the software, and to permit persons to whom the software is ',
'furnished to do so, subject to the following conditions:',
'the above copyright notice and this permission notice shall be ',
'included in all copies or substantial portions of the software.',
'the software is provided "as is", without warranty of any kind, ',
'express or implied, including but not limited to the warranties of ',
'merchantability, fitness for a particular purpose and ',
'noninfringement. in no event shall the authors or copyright holders ',
'be liable for any claim, damages or other liability, whether in an ',
'action of contract, tort or otherwise, arising from, out of or in ',
'connection with the software or the use or other dealings in the ',
'software.',
]
bsd3_txt = ['redistribution and use in source and binary forms, with or without '
'modification, are permitted provided that the following conditions '
'are met:',
'redistributions of source code',
'redistributions in binary form',
'neither the name',
'this software is provided by the copyright holders and '
'contributors "as is" and any express or implied warranties, '
'including, but not limited to, the implied warranties of '
'merchantability and fitness for a particular purpose are disclaimed.',
]
# BSD2 is BSD3 without the "neither the name..." clause
bsd2_txt = bsd3_txt[:3] + bsd3_txt[4:]
# This BSD3 variant leaves "and contributors" out of the last clause of BSD-3,
# which is still valid BSD-3
v1 = bsd3_txt[4].replace('and contributors', '')
bsd3_v1_txt = bsd3_txt[:3] + [v1]
# This source variant of BSD-3 leaves the "redistributions in binary form" out
# which is https://spdx.org/licenses/BSD-Source-Code.html
bsd3_src_txt = bsd3_txt[:2] + bsd3_txt[4:]
if __name__ == '__main__':
third_party = os.path.relpath(mydir)
parser = argparse.ArgumentParser(
description="Generate bundled licenses file",
)
parser.add_argument(
"--out-file",
type=str,
default=os.environ.get(
"PYTORCH_THIRD_PARTY_BUNDLED_LICENSE_FILE",
str(os.path.join(third_party, 'LICENSES_BUNDLED.txt'))
),
help="location to output new bundled licenses file",
)
args = parser.parse_args()
fname = args.out_file
print(f"+ Writing bundled licenses to {args.out_file}")
with open(fname, 'w') as fid:
create_bundled(third_party, fid)
| pytorch-master | third_party/build_bundled.py |
#!/usr/bin/env python3
from __future__ import print_function
import collections
import os
import sys
BANNER = "Auto-generated by generate-wrappers.py script. Do not modify"
WRAPPER_SRC_NAMES = {
"PROD_SCALAR_PORTABLE_MICROKERNEL_SRCS": None,
"PROD_SCALAR_AARCH32_MICROKERNEL_SRCS" : "defined(__arm__)",
"PROD_NEON_MICROKERNEL_SRCS": "defined(__arm__) || defined(__aarch64__)",
"PROD_NEONFP16_MICROKERNEL_SRCS": "defined(__arm__) || defined(__aarch64__)",
"PROD_NEONFMA_MICROKERNEL_SRCS": "defined(__arm__) || defined(__aarch64__)",
"PROD_AARCH64_NEON_MICROKERNEL_SRCS": "defined(__aarch64__)",
"PROD_NEONV8_MICROKERNEL_SRCS": "defined(__arm__) || defined(__aarch64__)",
"PROD_AARCH64_NEONFP16ARITH_MICROKERNEL_SRCS": "defined(__aarch64__)",
"PROD_NEONDOT_MICROKERNEL_SRCS": "defined(__arm__) || defined(__aarch64__)",
"PROD_SSE_MICROKERNEL_SRCS": "defined(__i386__) || defined(__i686__) || defined(__x86_64__)",
"PROD_SSE2_MICROKERNEL_SRCS": "defined(__i386__) || defined(__i686__) || defined(__x86_64__)",
"PROD_SSSE3_MICROKERNEL_SRCS": "defined(__i386__) || defined(__i686__) || defined(__x86_64__)",
"PROD_SSE41_MICROKERNEL_SRCS": "defined(__i386__) || defined(__i686__) || defined(__x86_64__)",
"PROD_AVX_MICROKERNEL_SRCS": "defined(__i386__) || defined(__i686__) || defined(__x86_64__)",
"PROD_F16C_MICROKERNEL_SRCS": "defined(__i386__) || defined(__i686__) || defined(__x86_64__)",
"PROD_XOP_MICROKERNEL_SRCS": "defined(__i386__) || defined(__i686__) || defined(__x86_64__)",
"PROD_FMA3_MICROKERNEL_SRCS": "defined(__i386__) || defined(__i686__) || defined(__x86_64__)",
"PROD_AVX2_MICROKERNEL_SRCS": "defined(__i386__) || defined(__i686__) || defined(__x86_64__)",
"PROD_AVX512F_MICROKERNEL_SRCS": "defined(__i386__) || defined(__i686__) || defined(__x86_64__)",
"PROD_AVX512SKX_MICROKERNEL_SRCS": "defined(__i386__) || defined(__i686__) || defined(__x86_64__)",
"AARCH32_ASM_MICROKERNEL_SRCS": "defined(__arm__)",
"AARCH64_ASM_MICROKERNEL_SRCS": "defined(__aarch64__)",
}
SRC_NAMES = [
"OPERATOR_SRCS",
"SUBGRAPH_SRCS",
"LOGGING_SRCS",
"HOT_SRCS",
"TABLE_SRCS",
"JIT_SRCS",
"JIT_AARCH32_SRCS",
"JIT_AARCH64_SRCS",
"PROD_SCALAR_PORTABLE_MICROKERNEL_SRCS",
"PROD_SSE_MICROKERNEL_SRCS",
"PROD_SSE2_MICROKERNEL_SRCS",
"PROD_SSSE3_MICROKERNEL_SRCS",
"PROD_SSE41_MICROKERNEL_SRCS",
"PROD_AVX_MICROKERNEL_SRCS",
"PROD_F16C_MICROKERNEL_SRCS",
"PROD_XOP_MICROKERNEL_SRCS",
"PROD_FMA3_MICROKERNEL_SRCS",
"PROD_AVX2_MICROKERNEL_SRCS",
"PROD_AVX512F_MICROKERNEL_SRCS",
"PROD_AVX512SKX_MICROKERNEL_SRCS",
]
def update_sources(xnnpack_path):
sources = collections.defaultdict(list)
with open(os.path.join(xnnpack_path, "XNNPACK/CMakeLists.txt")) as cmake:
lines = cmake.readlines()
i = 0
while i < len(lines):
line = lines[i]
if line.startswith("SET") and line.split('(')[1].strip(' \t\n\r') in set(WRAPPER_SRC_NAMES.keys()) | set(SRC_NAMES):
name = line.split('(')[1].strip(' \t\n\r')
i += 1
while i < len(lines) and len(lines[i]) > 0 and ')' not in lines[i]:
# remove "src/" at the beginning, remove whitespaces and newline
value = lines[i].strip(' \t\n\r')
sources[name].append(value[4:])
i += 1
if i < len(lines) and len(lines[i]) > 4:
# remove "src/" at the beginning, possibly ')' at the end
value = lines[i].strip(' \t\n\r)')
sources[name].append(value[4:])
else:
i += 1
return sources
def gen_wrappers(xnnpack_path):
xnnpack_sources = collections.defaultdict(list)
sources = update_sources(xnnpack_path)
for name in WRAPPER_SRC_NAMES:
xnnpack_sources[WRAPPER_SRC_NAMES[name]].extend(sources[name])
for condition, filenames in xnnpack_sources.items():
for filename in filenames:
filepath = os.path.join(xnnpack_path, "xnnpack_wrappers", filename)
if not os.path.isdir(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
with open(filepath, "w") as wrapper:
print("/* {} */".format(BANNER), file=wrapper)
print(file=wrapper)
# Architecture- or platform-dependent preprocessor flags can be
# defined here. Note: platform_preprocessor_flags can't be used
# because they are ignored by arc focus & buck project.
if condition is None:
print("#include <%s>" % filename, file=wrapper)
else:
# Include source file only if condition is satisfied
print("#if %s" % condition, file=wrapper)
print("#include <%s>" % filename, file=wrapper)
print("#endif /* %s */" % condition, file=wrapper)
# update xnnpack_wrapper_defs.bzl file under the same folder
with open(os.path.join(os.path.dirname(__file__), "xnnpack_wrapper_defs.bzl"), 'w') as wrapper_defs:
print('"""', file=wrapper_defs)
print(BANNER, file=wrapper_defs)
print('"""', file=wrapper_defs)
for name in WRAPPER_SRC_NAMES:
print('\n' + name + ' = [', file=wrapper_defs)
for file_name in sources[name]:
print(' "xnnpack_wrappers/{}",'.format(file_name), file=wrapper_defs)
print(']', file=wrapper_defs)
# update xnnpack_src_defs.bzl file under the same folder
with open(os.path.join(os.path.dirname(__file__), "xnnpack_src_defs.bzl"), 'w') as src_defs:
print('"""', file=src_defs)
print(BANNER, file=src_defs)
print('"""', file=src_defs)
for name in SRC_NAMES:
print('\n' + name + ' = [', file=src_defs)
for file_name in sources[name]:
print(' "XNNPACK/src/{}",'.format(file_name), file=src_defs)
print(']', file=src_defs)
def main(argv):
if argv is None or len(argv) == 0:
gen_wrappers(".")
else:
gen_wrappers(argv[0])
# The first argument is the place where the "xnnpack_wrappers" folder will be created.
# Run it without arguments will generate "xnnpack_wrappers" in the current path.
# The two .bzl files will always be generated in the current path.
if __name__ == "__main__":
main(sys.argv[1:])
| pytorch-master | third_party/generate-xnnpack-wrappers.py |
#!/usr/bin/env python3
import argparse
import ast
from caffe2.python.model_helper import ModelHelper
from caffe2.python.predictor import mobile_exporter
from caffe2.python import workspace, brew
def parse_kwarg(kwarg_str):
key, value = kwarg_str.split('=')
try:
value = ast.literal_eval(value)
except ValueError:
pass
return key, value
def main(args):
# User defined keyword arguments
kwargs = {"order": "NCHW", "use_cudnn": False}
kwargs.update(dict(args.kwargs))
model = ModelHelper(name=args.benchmark_name)
op_type = args.operator # assumes a brew type op name
input_name = args.input_name
output_name = args.output_name
iters = int(args.instances)
for i in range(iters):
input_blob_name = input_name + (str(i) if i > 0 and args.chain else '')
output_blob_name = output_name + str(i + 1)
add_op = getattr(brew, op_type)
add_op(model, input_blob_name, output_blob_name, **kwargs)
if args.chain:
input_name, output_name = output_name, input_name
workspace.RunNetOnce(model.param_init_net)
init_net, predict_net = mobile_exporter.Export(
workspace, model.net, model.params
)
if args.debug:
print("init_net:")
for op in init_net.op:
print(" ", op.type, op.input, "-->", op.output)
print("predict_net:")
for op in predict_net.op:
print(" ", op.type, op.input, "-->", op.output)
with open(args.predict_net, 'wb') as f:
f.write(predict_net.SerializeToString())
with open(args.init_net, 'wb') as f:
f.write(init_net.SerializeToString())
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Utility to generate Caffe2 benchmark models.")
parser.add_argument("operator", help="Caffe2 operator to benchmark.")
parser.add_argument("-b", "--blob",
help="Instantiate a blob --blob name=dim1,dim2,dim3",
action='append')
parser.add_argument("--context", help="Context to run on.", default="CPU")
parser.add_argument("--kwargs", help="kwargs to pass to operator.",
nargs="*", type=parse_kwarg, default=[])
parser.add_argument("--init_net", help="Output initialization net.",
default="init_net.pb")
parser.add_argument("--predict_net", help="Output prediction net.",
default="predict_net.pb")
parser.add_argument("--benchmark_name",
help="Name of the benchmark network",
default="benchmark")
parser.add_argument("--input_name", help="Name of the input blob.",
default="data")
parser.add_argument("--output_name", help="Name of the output blob.",
default="output")
parser.add_argument("--instances",
help="Number of instances to run the operator.",
default="1")
parser.add_argument("-d", "--debug", help="Print debug information.",
action='store_true')
parser.add_argument("-c", "--chain",
help="Chain ops together (create data dependencies)",
action='store_true')
args = parser.parse_args()
main(args)
| pytorch-master | binaries/bench_gen/bench_gen.py |
"""Scribe Uploader for Pytorch Benchmark Data
Currently supports data in pytest-benchmark format but can be extended.
New fields can be added just by modifying the schema in this file, schema
checking is only here to encourage reusing existing fields and avoiding typos.
"""
import argparse
import time
import json
import os
import requests
import subprocess
from collections import defaultdict
class ScribeUploader:
def __init__(self, category):
self.category = category
def format_message(self, field_dict):
assert 'time' in field_dict, "Missing required Scribe field 'time'"
message = defaultdict(dict)
for field, value in field_dict.items():
if field in self.schema['normal']:
message['normal'][field] = str(value)
elif field in self.schema['int']:
message['int'][field] = int(value)
elif field in self.schema['float']:
message['float'][field] = float(value)
else:
raise ValueError("Field {} is not currently used, "
"be intentional about adding new fields".format(field))
return message
def _upload_intern(self, messages):
for m in messages:
json_str = json.dumps(m)
cmd = ['scribe_cat', self.category, json_str]
subprocess.run(cmd)
def upload(self, messages):
if os.environ.get('SCRIBE_INTERN'):
return self._upload_intern(messages)
access_token = os.environ.get("SCRIBE_GRAPHQL_ACCESS_TOKEN")
if not access_token:
raise ValueError("Can't find access token from environment variable")
url = "https://graph.facebook.com/scribe_logs"
r = requests.post(
url,
data={
"access_token": access_token,
"logs": json.dumps(
[
{
"category": self.category,
"message": json.dumps(message),
"line_escape": False,
}
for message in messages
]
),
},
)
print(r.text)
r.raise_for_status()
class PytorchBenchmarkUploader(ScribeUploader):
def __init__(self):
super().__init__('perfpipe_pytorch_benchmarks')
self.schema = {
'int': [
'time', 'rounds',
],
'normal': [
'benchmark_group', 'benchmark_name', 'benchmark_executor',
'benchmark_fuser', 'benchmark_class', 'benchmark_time',
'pytorch_commit_id', 'pytorch_branch', 'pytorch_commit_time', 'pytorch_version',
'pytorch_git_dirty',
'machine_kernel', 'machine_processor', 'machine_hostname',
'circle_build_num', 'circle_project_reponame',
],
'float': [
'stddev', 'min', 'median', 'max', 'mean',
]
}
def post_pytest_benchmarks(self, pytest_json):
machine_info = pytest_json['machine_info']
commit_info = pytest_json['commit_info']
upload_time = int(time.time())
messages = []
for b in pytest_json['benchmarks']:
test = b['name'].split('[')[0]
net_name = b['params']['net_name']
benchmark_name = '{}[{}]'.format(test, net_name)
executor = b['params']['executor']
fuser = b['params']['fuser']
m = self.format_message({
"time": upload_time,
"benchmark_group": b['group'],
"benchmark_name": benchmark_name,
"benchmark_executor": executor,
"benchmark_fuser": fuser,
"benchmark_class": b['fullname'],
"benchmark_time": pytest_json['datetime'],
"pytorch_commit_id": commit_info['id'],
"pytorch_branch": commit_info['branch'],
"pytorch_commit_time": commit_info['time'],
"pytorch_version": None,
"pytorch_git_dirty": commit_info['dirty'],
"machine_kernel": machine_info['release'],
"machine_processor": machine_info['processor'],
"machine_hostname": machine_info['node'],
"circle_build_num": os.environ.get("CIRCLE_BUILD_NUM"),
"circle_project_reponame": os.environ.get("CIRCLE_PROJECT_REPONAME"),
"stddev": b['stats']['stddev'],
"rounds": b['stats']['rounds'],
"min": b['stats']['min'],
"median": b['stats']['median'],
"max": b['stats']['max'],
"mean": b['stats']['mean'],
})
messages.append(m)
self.upload(messages)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--pytest_bench_json", type=argparse.FileType('r'),
help='Upload json data formatted by pytest-benchmark module')
args = parser.parse_args()
if args.pytest_bench_json:
benchmark_uploader = PytorchBenchmarkUploader()
json_data = json.load(args.pytest_bench_json)
benchmark_uploader.post_pytest_benchmarks(json_data)
| pytorch-master | benchmarks/upload_scribe.py |
import argparse
import json
from collections import namedtuple
Result = namedtuple("Result", ["name", "base_time", "diff_time"])
def construct_name(fwd_bwd, test_name):
bwd = 'backward' in fwd_bwd
suite_name = fwd_bwd.replace('-backward', '')
return '{suite}[{test}]:{fwd_bwd}'.format(suite=suite_name, test=test_name, fwd_bwd='bwd' if bwd else 'fwd')
def get_times(json_data):
r = {}
for fwd_bwd in json_data:
for test_name in json_data[fwd_bwd]:
name = construct_name(fwd_bwd, test_name)
r[name] = json_data[fwd_bwd][test_name]
return r
parser = argparse.ArgumentParser("compare two pytest jsons")
parser.add_argument('base', help="base json file")
parser.add_argument('diff', help='diff json file')
parser.add_argument('--format', default='md', type=str, help='output format (csv, md, json, table)')
args = parser.parse_args()
with open(args.base, "r") as base:
base_times = get_times(json.load(base))
with open(args.diff, "r") as diff:
diff_times = get_times(json.load(diff))
all_keys = set(base_times.keys()).union(diff_times.keys())
results = [
Result(name, base_times.get(name, float("nan")), diff_times.get(name, float("nan")))
for name in sorted(all_keys)
]
header_fmt = {'table' : '{:48s} {:>13s} {:>15s} {:>10s}',
'md' : '| {:48s} | {:>13s} | {:>15s} | {:>10s} |',
'csv' : '{:s}, {:s}, {:s}, {:s}'}
data_fmt = {'table' : '{:48s} {:13.6f} {:15.6f} {:9.1f}%',
'md' : '| {:48s} | {:13.6f} | {:15.6f} | {:9.1f}% |',
'csv' : '{:s}, {:.6f}, {:.6f}, {:.2f}%'}
if args.format in ['table', 'md', 'csv']:
header_fmt_str = header_fmt[args.format]
data_fmt_str = data_fmt[args.format]
print(header_fmt_str.format("name", "base time (s)", "diff time (s)", "% change"))
if args.format == 'md':
print(header_fmt_str.format(":---", "---:", "---:", "---:"))
for r in results:
print(data_fmt_str.format(r.name, r.base_time, r.diff_time, (r.diff_time / r.base_time - 1.0) * 100.0))
elif args.format == 'json':
print(json.dumps(results))
else:
raise ValueError('Unknown output format: ' + args.format)
| pytorch-master | benchmarks/compare-fastrnn-results.py |
import torch
import argparse
from common import SubTensor, WithTorchFunction, SubWithTorchFunction # noqa: F401
Tensor = torch.tensor
NUM_REPEATS = 1000000
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Run the torch.add for a given class a given number of times."
)
parser.add_argument(
"tensor_class", metavar="TensorClass", type=str, help="The class to benchmark."
)
parser.add_argument(
"--nreps", "-n", type=int, default=NUM_REPEATS, help="The number of repeats."
)
args = parser.parse_args()
TensorClass = globals()[args.tensor_class]
NUM_REPEATS = args.nreps
t1 = TensorClass([1.])
t2 = TensorClass([2.])
for _ in range(NUM_REPEATS):
torch.add(t1, t2)
| pytorch-master | benchmarks/overrides_benchmark/pyspybench.py |
import torch
import time
import argparse
from common import SubTensor, WithTorchFunction, SubWithTorchFunction
NUM_REPEATS = 1000
NUM_REPEAT_OF_REPEATS = 1000
def bench(t1, t2):
bench_times = []
for _ in range(NUM_REPEAT_OF_REPEATS):
time_start = time.time()
for _ in range(NUM_REPEATS):
torch.add(t1, t2)
bench_times.append(time.time() - time_start)
bench_time = float(torch.min(torch.tensor(bench_times))) / 1000
bench_std = float(torch.std(torch.tensor(bench_times))) / 1000
return bench_time, bench_std
def main():
global NUM_REPEATS
global NUM_REPEAT_OF_REPEATS
parser = argparse.ArgumentParser(
description="Run the __torch_function__ benchmarks."
)
parser.add_argument(
"--nreps",
"-n",
type=int,
default=NUM_REPEATS,
help="The number of repeats for one measurement.",
)
parser.add_argument(
"--nrepreps",
"-m",
type=int,
default=NUM_REPEAT_OF_REPEATS,
help="The number of measurements.",
)
args = parser.parse_args()
NUM_REPEATS = args.nreps
NUM_REPEAT_OF_REPEATS = args.nrepreps
types = torch.tensor, SubTensor, WithTorchFunction, SubWithTorchFunction
for t in types:
tensor_1 = t([1.])
tensor_2 = t([2.])
bench_min, bench_std = bench(tensor_1, tensor_2)
print(
"Type {0} had a minimum time of {1} us"
" and a standard deviation of {2} us.".format(
t.__name__, (10 ** 6 * bench_min), (10 ** 6) * bench_std
)
)
if __name__ == "__main__":
main()
| pytorch-master | benchmarks/overrides_benchmark/bench.py |
import torch
NUM_REPEATS = 1000
NUM_REPEAT_OF_REPEATS = 1000
class SubTensor(torch.Tensor):
pass
class WithTorchFunction:
def __init__(self, data, requires_grad=False):
if isinstance(data, torch.Tensor):
self._tensor = data
return
self._tensor = torch.tensor(data, requires_grad=requires_grad)
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
return WithTorchFunction(args[0]._tensor + args[1]._tensor)
class SubWithTorchFunction(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
return super().__torch_function__(func, types, args, kwargs)
| pytorch-master | benchmarks/overrides_benchmark/common.py |
from caffe2.python import workspace, core
import numpy as np
from utils import NUM_LOOP_ITERS
workspace.GlobalInit(['caffe2'])
def add_blob(ws, blob_name, tensor_size):
blob_tensor = np.random.randn(*tensor_size).astype(np.float32)
ws.FeedBlob(blob_name, blob_tensor)
class C2SimpleNet(object):
"""
This module constructs a net with 'op_name' operator. The net consist
a series of such operator.
It initializes the workspace with input blob equal to the number of parameters
needed for the op.
Provides forward method to run the net niter times.
"""
def __init__(self, op_name, num_inputs=1, debug=False):
self.input_names = []
self.net = core.Net("framework_benchmark_net")
self.input_names = ["in_{}".format(i) for i in range(num_inputs)]
for i in range(num_inputs):
add_blob(workspace, self.input_names[i], [1])
self.net.AddExternalInputs(self.input_names)
op_constructor = getattr(self.net, op_name)
op_constructor(self.input_names)
self.output_name = self.net._net.op[-1].output
print("Benchmarking op {}:".format(op_name))
for _ in range(NUM_LOOP_ITERS):
output_name = self.net._net.op[-1].output
self.input_names[-1] = output_name[0]
assert len(self.input_names) == num_inputs
op_constructor(self.input_names)
workspace.CreateNet(self.net)
if debug:
print(self.net._net)
def forward(self, niters):
workspace.RunNet(self.net, niters, False)
| pytorch-master | benchmarks/framework_overhead_benchmark/C2Module.py |
import time
from collections import namedtuple
from torch.utils import ThroughputBenchmark
NUM_LOOP_ITERS = 1000
BenchmarkConfig = namedtuple('BenchmarkConfig', 'num_warmup_iters num_iters')
ModuleConfig = namedtuple('ModuleConfig', 'pt_fn c2_op num_params graph_mode')
def ms_to_us(time_ms):
return (time_ms * 1e3)
def secs_to_us(time_s):
return (time_s * 1e6)
def secs_to_ms(time_s):
return (time_s * 1e3)
def benchmark_using_throughput_benchmark(config, module):
print("Benchmarking via ThroughputBenchmark")
bench = ThroughputBenchmark(module.module)
bench.add_input(*module.tensor_inputs)
stats = bench.benchmark(1, config.num_warmup_iters, config.num_iters)
return stats.latency_avg_ms / NUM_LOOP_ITERS
def benchmark_module(config, module, use_throughput_benchmark=False):
if use_throughput_benchmark:
return benchmark_using_throughput_benchmark(config, module)
module.forward(config.num_warmup_iters)
print("Running module for {} iterations".format(config.num_iters))
start = time.time()
module.forward(config.num_iters)
end = time.time()
time_elapsed_s = (end - start)
return (secs_to_ms(time_elapsed_s) / config.num_iters / NUM_LOOP_ITERS)
| pytorch-master | benchmarks/framework_overhead_benchmark/utils.py |
import torch
from utils import NUM_LOOP_ITERS
def add_tensors_loop(x, y):
z = torch.add(x, y)
for i in range(NUM_LOOP_ITERS):
z = torch.add(z, x)
return z
class SimpleAddModule(torch.nn.Module):
def __init__(self, add_op):
super(SimpleAddModule, self).__init__()
self.add_op = add_op
def forward(self, x, y):
return self.add_op(x, y)
| pytorch-master | benchmarks/framework_overhead_benchmark/SimpleAddModule.py |
from utils import ms_to_us, benchmark_module, BenchmarkConfig, ModuleConfig
import argparse
from C2Module import C2SimpleNet
from SimpleAddModule import SimpleAddModule, add_tensors_loop
from pt_wrapper_module import WrapperModule
""" Framework overhead benchmark script.
Benchmark framework overhead.
Currently supported ops: add.
As of now runs only forward pass.
Supports both graph mode and eager mode. In graph mode the module is traced via JIT tracing.
Debug option prints the traced graph is graph_mode is enabled.
Graph can be saved via save option. Saved in the directory where benchmark is run.
Example build/run:
To run PT benchmark:
buck run @mode/opt <path-to-framework_overhead_benchmark>:framework_overhead_benchmark --
--add_op --graph_mode --eager_mode (Runs both graph mode and eager mode)
buck run @mode/opt <path-to-framework_overhead_benchmark>:framework_overhead_benchmark --
--add_op --graph_mode (Runs only graph mode)
To run C2 benchmark:
buck run @mode/opt <path-to-framework_overhead_benchmark>:framework_overhead_benchmark --
--add_op --benchmark_c2_net
"""
SUPPORTED_OPS = {"add_op"}
def parse_op_args(op):
op_list = ops.split(",")
def print_results(result):
print("===================================")
for key, value in result.items():
print("{}, latency per iter (us):{}".format(key, ms_to_us(value)))
print("===================================")
def benchmark_simple_fn(args, config, module_config, module_type, result):
""" Benchmarks a PyTorch traceable function specified in the config.
Instantiates a wrapper object that wraps the object of module_type and runs the forward
method using benchmark_module.
Args:
config: contains number of warmup and benchmark iterations.
module_config: module_config which contains op, number of parameters that op takes
and whether graph mode is enabled or not.
module_type: Type of the module to be wrapped. e.g. SimpleAddModule for add op.
result: dictionary instance to be populated with the benchmark result (latency per iter).
"""
benchmark_c2_net = args.benchmark_c2_net
print("Benchmarking {}".format(module_type.__name__))
if benchmark_c2_net:
op_name = module_config.c2_op
num_inputs = module_config.num_params
module = C2SimpleNet(op_name, num_inputs=num_inputs, debug=args.debug)
latency_per_iter_ms = benchmark_module(config, module)
result[op_name] = latency_per_iter_ms
else:
f_name = module_config.pt_fn.__name__ + ":Num Operands=" + str(module_config.num_params)
graph_mode_str = "Graph mode" + ":" + str(module_config.graph_mode)
result_key = ','.join((f_name, graph_mode_str))
module = WrapperModule(module_type, module_config, args.debug, args.save)
latency_per_iter_ms = benchmark_module(config, module, args.use_throughput_benchmark)
result[result_key] = latency_per_iter_ms
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--op", default="add_op", dest="op", type=str)
parser.add_argument("--benchmark_c2_net", default=False, dest="benchmark_c2_net", action="store_true")
parser.add_argument("--use_throughput_benchmark", default=False, dest="use_throughput_benchmark", action="store_true")
parser.add_argument("--debug", default=False, dest="debug", action="store_true")
parser.add_argument("--save", default=False, dest="save", action="store_true")
parser.add_argument("--eager_mode", default=False, dest="eager_mode", action="store_true")
parser.add_argument("--num_warmup_iters", type=int, default=100)
parser.add_argument("--num_iters", type=int, default=1000)
args = parser.parse_args()
if args.op not in SUPPORTED_OPS:
print("Op {} is not supported: Supported ops are:{}".format(args.op, SUPPORTED_OPS))
return
assert not (args.benchmark_c2_net and args.use_throughput_benchmark), \
"Benchmarking of C2 net via throughput benchmarking is not yet supported"
num_warmup_iters = args.num_warmup_iters
num_iters = args.num_iters
config = BenchmarkConfig(num_warmup_iters, num_iters)
graph_mode = True
if args.eager_mode:
graph_mode = False
result = {}
if args.op == "add_op":
num_params = 2
if args.benchmark_c2_net:
module_config = ModuleConfig(None, 'Sum', num_params, None)
else:
module_config = ModuleConfig(add_tensors_loop, None, num_params, graph_mode)
benchmark_simple_fn(args, config, module_config, SimpleAddModule, result)
print_results(result)
if __name__ == "__main__":
main()
| pytorch-master | benchmarks/framework_overhead_benchmark/framework_overhead_benchmark.py |
import torch
class WrapperModule(object):
""" Wraps the instance of wrapped_type.
For graph_mode traces the instance of wrapped_type.
Randomaly initializes num_params tensors with single float element.
Args:
wrapped_type:
- Object type to be wrapped.
Expects the wrapped_type to:
- be constructed with pt_fn specified in module_config.
- provide forward method that takes module_config.num_params args.
module_config:
- Specified pt_fn to construct wrapped_type with, whether graph_mode
is enabled, and number of parameters wrapped_type's forward method
takes.
debug:
- Whether debug mode is enabled.
save:
- In graph mode, whether graph is to be saved.
"""
def __init__(self, wrapped_type, module_config, debug, save=False):
pt_fn = module_config.pt_fn
self.module = wrapped_type(pt_fn)
self.tensor_inputs = []
self.module_name = wrapped_type.__name__
for _ in range(module_config.num_params):
self.tensor_inputs.append(torch.randn(1))
if module_config.graph_mode:
self.module = torch.jit.trace(self.module, self.tensor_inputs)
if save:
file_name = self.module_name + "_" + pt_fn.__name__ + ".pt"
torch.jit.save(self.module, file_name)
print("Generated graph is saved in {}".format(file_name))
print("Benchmarking module {} with fn {}: Graph mode:{}".format(self.module_name, pt_fn.__name__, module_config.graph_mode))
if (debug and isinstance(self.module, torch.jit.ScriptModule)):
print(self.module.graph)
print(self.module.code)
def forward(self, niters):
with torch.no_grad():
for _ in range(niters):
self.module.forward(*self.tensor_inputs)
| pytorch-master | benchmarks/framework_overhead_benchmark/pt_wrapper_module.py |
import pytest
import torch
from .fuser import set_fuser
from .runner import get_nn_runners
@pytest.fixture(scope='class')
def modeldef(request, net_name, executor, fuser):
set_fuser(fuser, executor)
# Given a 'net_name' provided by generate_tests, build the thing
name, rnn_creator, context = get_nn_runners(net_name)[0]
creator_args = creator_args = {
'seqLength': 100, 'numLayers': 1,
'inputSize': 512, 'hiddenSize': 512,
'miniBatch': 64, 'device': 'cuda', 'seed': None
}
return rnn_creator(**creator_args)
def cuda_sync(func, *args, **kwargs):
out = func(*args, **kwargs)
torch.cuda.synchronize()
return out
@pytest.mark.benchmark(
warmup=True,
warmup_iterations=3,
disable_gc=True,
max_time=0.1,
group="fastrnns",
)
class TestBenchNetwork:
# See 'modeldef' fixture, which provides the things to benchmark
def test_forward(self, modeldef, benchmark):
forward_output = benchmark(cuda_sync, modeldef.forward, *modeldef.inputs)
def test_backward(self, modeldef, benchmark):
backward_input = modeldef.forward(*modeldef.inputs)
if modeldef.backward_setup is not None:
backward_input = modeldef.backward_setup(backward_input)
if modeldef.backward is not None:
benchmark(cuda_sync, modeldef.backward, *backward_input, retain_graph=True)
with torch.no_grad():
for param in modeldef.params:
assert param.grad is not None
param.grad.zero_()
| pytorch-master | benchmarks/fastrnns/test_bench.py |
import pytest # noqa: F401
default_rnns = ['cudnn', 'aten', 'jit', 'jit_premul', 'jit_premul_bias', 'jit_simple',
'jit_multilayer', 'py']
default_cnns = ['resnet18', 'resnet18_jit', 'resnet50', 'resnet50_jit']
all_nets = default_rnns + default_cnns
def pytest_generate_tests(metafunc):
# This creates lists of tests to generate, can be customized
if metafunc.cls.__name__ == "TestBenchNetwork":
metafunc.parametrize('net_name', all_nets, scope="class")
metafunc.parametrize("executor", [metafunc.config.getoption("executor")], scope="class")
metafunc.parametrize("fuser", [metafunc.config.getoption("fuser")], scope="class")
def pytest_addoption(parser):
parser.addoption("--fuser", default="old", help="fuser to use for benchmarks")
parser.addoption("--executor", default="legacy", help="executor to use for benchmarks")
| pytorch-master | benchmarks/fastrnns/conftest.py |
from collections import namedtuple
from functools import partial
import torch
import torchvision.models as cnn
from .factory import (dropoutlstm_creator, imagenet_cnn_creator,
layernorm_pytorch_lstm_creator, lnlstm_creator,
lstm_creator, lstm_multilayer_creator,
lstm_premul_bias_creator, lstm_premul_creator,
lstm_simple_creator, pytorch_lstm_creator,
varlen_lstm_creator, varlen_pytorch_lstm_creator)
class DisableCuDNN():
def __enter__(self):
self.saved = torch.backends.cudnn.enabled
torch.backends.cudnn.enabled = False
def __exit__(self, *args, **kwargs):
torch.backends.cudnn.enabled = self.saved
class DummyContext():
def __enter__(self):
pass
def __exit__(self, *args, **kwargs):
pass
class AssertNoJIT():
def __enter__(self):
import os
enabled = os.environ.get('PYTORCH_JIT', 1)
assert not enabled
def __exit__(self, *args, **kwargs):
pass
RNNRunner = namedtuple('RNNRunner', [
'name', 'creator', 'context',
])
def get_nn_runners(*names):
return [nn_runners[name] for name in names]
nn_runners = {
'cudnn': RNNRunner('cudnn', pytorch_lstm_creator, DummyContext),
'cudnn_dropout': RNNRunner('cudnn_dropout', partial(pytorch_lstm_creator, dropout=0.4), DummyContext),
'cudnn_layernorm': RNNRunner('cudnn_layernorm', layernorm_pytorch_lstm_creator, DummyContext),
'vl_cudnn': RNNRunner('vl_cudnn', varlen_pytorch_lstm_creator, DummyContext),
'vl_jit': RNNRunner('vl_jit', partial(varlen_lstm_creator, script=True), DummyContext),
'vl_py': RNNRunner('vl_py', varlen_lstm_creator, DummyContext),
'aten': RNNRunner('aten', pytorch_lstm_creator, DisableCuDNN),
'jit': RNNRunner('jit', lstm_creator, DummyContext),
'jit_premul': RNNRunner('jit_premul', lstm_premul_creator, DummyContext),
'jit_premul_bias': RNNRunner('jit_premul_bias', lstm_premul_bias_creator, DummyContext),
'jit_simple': RNNRunner('jit_simple', lstm_simple_creator, DummyContext),
'jit_multilayer': RNNRunner('jit_multilayer', lstm_multilayer_creator, DummyContext),
'jit_layernorm': RNNRunner('jit_layernorm', lnlstm_creator, DummyContext),
'jit_layernorm_decom': RNNRunner('jit_layernorm_decom',
partial(lnlstm_creator, decompose_layernorm=True),
DummyContext),
'jit_dropout': RNNRunner('jit_dropout', dropoutlstm_creator, DummyContext),
'py': RNNRunner('py', partial(lstm_creator, script=False), DummyContext),
'resnet18': RNNRunner('resnet18', imagenet_cnn_creator(cnn.resnet18, jit=False), DummyContext),
'resnet18_jit': RNNRunner('resnet18_jit', imagenet_cnn_creator(cnn.resnet18), DummyContext),
'resnet50': RNNRunner('resnet50', imagenet_cnn_creator(cnn.resnet50, jit=False), DummyContext),
'resnet50_jit': RNNRunner('resnet50_jit', imagenet_cnn_creator(cnn.resnet50), DummyContext),
}
| pytorch-master | benchmarks/fastrnns/runner.py |
import torch
def set_fuser(fuser_name, executor_name):
assert fuser_name in ['te', 'old', 'none', 'default']
if fuser_name == 'te':
torch._C._jit_set_profiling_executor(True)
torch._C._get_graph_executor_optimize(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
elif fuser_name == 'old':
torch._C._jit_set_profiling_executor(False)
torch._C._get_graph_executor_optimize(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(False)
elif fuser_name == 'none':
torch._C._jit_set_profiling_executor(False)
torch._C._get_graph_executor_optimize(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
elif fuser_name == 'default':
pass
# --executor overrides settings of --fuser
if executor_name == 'profiling':
torch._C._jit_set_profiling_executor(True)
torch._C._get_graph_executor_optimize(True)
elif executor_name == 'simple':
torch._C._get_graph_executor_optimize(False)
elif executor_name == 'legacy':
torch._C._jit_set_profiling_executor(False)
torch._C._get_graph_executor_optimize(True)
elif executor_name == 'default':
pass
| pytorch-master | benchmarks/fastrnns/fuser.py |
import torch
import torch.nn as nn
from torch.nn import Parameter
import torch.jit as jit
import warnings
from collections import namedtuple
from typing import List, Tuple
from torch import Tensor
import numbers
'''
Some helper classes for writing custom TorchScript LSTMs.
Goals:
- Classes are easy to read, use, and extend
- Performance of custom LSTMs approach fused-kernel-levels of speed.
A few notes about features we could add to clean up the below code:
- Support enumerate with nn.ModuleList:
https://github.com/pytorch/pytorch/issues/14471
- Support enumerate/zip with lists:
https://github.com/pytorch/pytorch/issues/15952
- Support overriding of class methods:
https://github.com/pytorch/pytorch/issues/10733
- Support passing around user-defined namedtuple types for readability
- Support slicing w/ range. It enables reversing lists easily.
https://github.com/pytorch/pytorch/issues/10774
- Multiline type annotations. List[List[Tuple[Tensor,Tensor]]] is verbose
https://github.com/pytorch/pytorch/pull/14922
'''
def script_lstm(input_size, hidden_size, num_layers, bias=True,
batch_first=False, dropout=False, bidirectional=False):
'''Returns a ScriptModule that mimics a PyTorch native LSTM.'''
# The following are not implemented.
assert bias
assert not batch_first
if bidirectional:
stack_type = StackedLSTM2
layer_type = BidirLSTMLayer
dirs = 2
elif dropout:
stack_type = StackedLSTMWithDropout
layer_type = LSTMLayer
dirs = 1
else:
stack_type = StackedLSTM
layer_type = LSTMLayer
dirs = 1
return stack_type(num_layers, layer_type,
first_layer_args=[LSTMCell, input_size, hidden_size],
other_layer_args=[LSTMCell, hidden_size * dirs,
hidden_size])
def script_lnlstm(input_size, hidden_size, num_layers, bias=True,
batch_first=False, dropout=False, bidirectional=False,
decompose_layernorm=False):
'''Returns a ScriptModule that mimics a PyTorch native LSTM.'''
# The following are not implemented.
assert bias
assert not batch_first
assert not dropout
if bidirectional:
stack_type = StackedLSTM2
layer_type = BidirLSTMLayer
dirs = 2
else:
stack_type = StackedLSTM
layer_type = LSTMLayer
dirs = 1
return stack_type(num_layers, layer_type,
first_layer_args=[LayerNormLSTMCell, input_size, hidden_size,
decompose_layernorm],
other_layer_args=[LayerNormLSTMCell, hidden_size * dirs,
hidden_size, decompose_layernorm])
LSTMState = namedtuple('LSTMState', ['hx', 'cx'])
def reverse(lst: List[Tensor]) -> List[Tensor]:
return lst[::-1]
class LSTMCell(jit.ScriptModule):
def __init__(self, input_size, hidden_size):
super(LSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.weight_ih = Parameter(torch.randn(4 * hidden_size, input_size))
self.weight_hh = Parameter(torch.randn(4 * hidden_size, hidden_size))
self.bias_ih = Parameter(torch.randn(4 * hidden_size))
self.bias_hh = Parameter(torch.randn(4 * hidden_size))
@jit.script_method
def forward(self, input: Tensor, state: Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
hx, cx = state
gates = (torch.mm(input, self.weight_ih.t()) + self.bias_ih +
torch.mm(hx, self.weight_hh.t()) + self.bias_hh)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * torch.tanh(cy)
return hy, (hy, cy)
class LayerNorm(jit.ScriptModule):
def __init__(self, normalized_shape):
super(LayerNorm, self).__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
normalized_shape = torch.Size(normalized_shape)
# XXX: This is true for our LSTM / NLP use case and helps simplify code
assert len(normalized_shape) == 1
self.weight = Parameter(torch.ones(normalized_shape))
self.bias = Parameter(torch.zeros(normalized_shape))
self.normalized_shape = normalized_shape
@jit.script_method
def compute_layernorm_stats(self, input):
mu = input.mean(-1, keepdim=True)
sigma = input.std(-1, keepdim=True, unbiased=False)
return mu, sigma
@jit.script_method
def forward(self, input):
mu, sigma = self.compute_layernorm_stats(input)
return (input - mu) / sigma * self.weight + self.bias
class LayerNormLSTMCell(jit.ScriptModule):
def __init__(self, input_size, hidden_size, decompose_layernorm=False):
super(LayerNormLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.weight_ih = Parameter(torch.randn(4 * hidden_size, input_size))
self.weight_hh = Parameter(torch.randn(4 * hidden_size, hidden_size))
# The layernorms provide learnable biases
if decompose_layernorm:
ln = LayerNorm
else:
ln = nn.LayerNorm
self.layernorm_i = ln(4 * hidden_size)
self.layernorm_h = ln(4 * hidden_size)
self.layernorm_c = ln(hidden_size)
@jit.script_method
def forward(self, input: Tensor, state: Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
hx, cx = state
igates = self.layernorm_i(torch.mm(input, self.weight_ih.t()))
hgates = self.layernorm_h(torch.mm(hx, self.weight_hh.t()))
gates = igates + hgates
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = self.layernorm_c((forgetgate * cx) + (ingate * cellgate))
hy = outgate * torch.tanh(cy)
return hy, (hy, cy)
class LSTMLayer(jit.ScriptModule):
def __init__(self, cell, *cell_args):
super(LSTMLayer, self).__init__()
self.cell = cell(*cell_args)
@jit.script_method
def forward(self, input: Tensor, state: Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
inputs = input.unbind(0)
outputs = torch.jit.annotate(List[Tensor], [])
for i in range(len(inputs)):
out, state = self.cell(inputs[i], state)
outputs += [out]
return torch.stack(outputs), state
class ReverseLSTMLayer(jit.ScriptModule):
def __init__(self, cell, *cell_args):
super(ReverseLSTMLayer, self).__init__()
self.cell = cell(*cell_args)
@jit.script_method
def forward(self, input: Tensor, state: Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
inputs = reverse(input.unbind(0))
outputs = jit.annotate(List[Tensor], [])
for i in range(len(inputs)):
out, state = self.cell(inputs[i], state)
outputs += [out]
return torch.stack(reverse(outputs)), state
class BidirLSTMLayer(jit.ScriptModule):
__constants__ = ['directions']
def __init__(self, cell, *cell_args):
super(BidirLSTMLayer, self).__init__()
self.directions = nn.ModuleList([
LSTMLayer(cell, *cell_args),
ReverseLSTMLayer(cell, *cell_args),
])
@jit.script_method
def forward(self, input: Tensor, states: List[Tuple[Tensor, Tensor]]) -> Tuple[Tensor, List[Tuple[Tensor, Tensor]]]:
# List[LSTMState]: [forward LSTMState, backward LSTMState]
outputs = jit.annotate(List[Tensor], [])
output_states = jit.annotate(List[Tuple[Tensor, Tensor]], [])
# XXX: enumerate https://github.com/pytorch/pytorch/issues/14471
i = 0
for direction in self.directions:
state = states[i]
out, out_state = direction(input, state)
outputs += [out]
output_states += [out_state]
i += 1
return torch.cat(outputs, -1), output_states
def init_stacked_lstm(num_layers, layer, first_layer_args, other_layer_args):
layers = [layer(*first_layer_args)] + [layer(*other_layer_args)
for _ in range(num_layers - 1)]
return nn.ModuleList(layers)
class StackedLSTM(jit.ScriptModule):
__constants__ = ['layers'] # Necessary for iterating through self.layers
def __init__(self, num_layers, layer, first_layer_args, other_layer_args):
super(StackedLSTM, self).__init__()
self.layers = init_stacked_lstm(num_layers, layer, first_layer_args,
other_layer_args)
@jit.script_method
def forward(self, input: Tensor, states: List[Tuple[Tensor, Tensor]]) -> Tuple[Tensor, List[Tuple[Tensor, Tensor]]]:
# List[LSTMState]: One state per layer
output_states = jit.annotate(List[Tuple[Tensor, Tensor]], [])
output = input
# XXX: enumerate https://github.com/pytorch/pytorch/issues/14471
i = 0
for rnn_layer in self.layers:
state = states[i]
output, out_state = rnn_layer(output, state)
output_states += [out_state]
i += 1
return output, output_states
# Differs from StackedLSTM in that its forward method takes
# List[List[Tuple[Tensor,Tensor]]]. It would be nice to subclass StackedLSTM
# except we don't support overriding script methods.
# https://github.com/pytorch/pytorch/issues/10733
class StackedLSTM2(jit.ScriptModule):
__constants__ = ['layers'] # Necessary for iterating through self.layers
def __init__(self, num_layers, layer, first_layer_args, other_layer_args):
super(StackedLSTM2, self).__init__()
self.layers = init_stacked_lstm(num_layers, layer, first_layer_args,
other_layer_args)
@jit.script_method
def forward(self, input: Tensor, states: List[List[Tuple[Tensor, Tensor]]]) -> Tuple[Tensor, List[List[Tuple[Tensor, Tensor]]]]:
# List[List[LSTMState]]: The outer list is for layers,
# inner list is for directions.
output_states = jit.annotate(List[List[Tuple[Tensor, Tensor]]], [])
output = input
# XXX: enumerate https://github.com/pytorch/pytorch/issues/14471
i = 0
for rnn_layer in self.layers:
state = states[i]
output, out_state = rnn_layer(output, state)
output_states += [out_state]
i += 1
return output, output_states
class StackedLSTMWithDropout(jit.ScriptModule):
# Necessary for iterating through self.layers and dropout support
__constants__ = ['layers', 'num_layers']
def __init__(self, num_layers, layer, first_layer_args, other_layer_args):
super(StackedLSTMWithDropout, self).__init__()
self.layers = init_stacked_lstm(num_layers, layer, first_layer_args,
other_layer_args)
# Introduces a Dropout layer on the outputs of each LSTM layer except
# the last layer, with dropout probability = 0.4.
self.num_layers = num_layers
if (num_layers == 1):
warnings.warn("dropout lstm adds dropout layers after all but last "
"recurrent layer, it expects num_layers greater than "
"1, but got num_layers = 1")
self.dropout_layer = nn.Dropout(0.4)
@jit.script_method
def forward(self, input: Tensor, states: List[Tuple[Tensor, Tensor]]) -> Tuple[Tensor, List[Tuple[Tensor, Tensor]]]:
# List[LSTMState]: One state per layer
output_states = jit.annotate(List[Tuple[Tensor, Tensor]], [])
output = input
# XXX: enumerate https://github.com/pytorch/pytorch/issues/14471
i = 0
for rnn_layer in self.layers:
state = states[i]
output, out_state = rnn_layer(output, state)
# Apply the dropout layer except the last layer
if i < self.num_layers - 1:
output = self.dropout_layer(output)
output_states += [out_state]
i += 1
return output, output_states
def flatten_states(states):
states = list(zip(*states))
assert len(states) == 2
return [torch.stack(state) for state in states]
def double_flatten_states(states):
# XXX: Can probably write this in a nicer way
states = flatten_states([flatten_states(inner) for inner in states])
return [hidden.view([-1] + list(hidden.shape[2:])) for hidden in states]
def test_script_rnn_layer(seq_len, batch, input_size, hidden_size):
inp = torch.randn(seq_len, batch, input_size)
state = LSTMState(torch.randn(batch, hidden_size),
torch.randn(batch, hidden_size))
rnn = LSTMLayer(LSTMCell, input_size, hidden_size)
out, out_state = rnn(inp, state)
# Control: pytorch native LSTM
lstm = nn.LSTM(input_size, hidden_size, 1)
lstm_state = LSTMState(state.hx.unsqueeze(0), state.cx.unsqueeze(0))
for lstm_param, custom_param in zip(lstm.all_weights[0], rnn.parameters()):
assert lstm_param.shape == custom_param.shape
with torch.no_grad():
lstm_param.copy_(custom_param)
lstm_out, lstm_out_state = lstm(inp, lstm_state)
assert (out - lstm_out).abs().max() < 1e-5
assert (out_state[0] - lstm_out_state[0]).abs().max() < 1e-5
assert (out_state[1] - lstm_out_state[1]).abs().max() < 1e-5
def test_script_stacked_rnn(seq_len, batch, input_size, hidden_size,
num_layers):
inp = torch.randn(seq_len, batch, input_size)
states = [LSTMState(torch.randn(batch, hidden_size),
torch.randn(batch, hidden_size))
for _ in range(num_layers)]
rnn = script_lstm(input_size, hidden_size, num_layers)
out, out_state = rnn(inp, states)
custom_state = flatten_states(out_state)
# Control: pytorch native LSTM
lstm = nn.LSTM(input_size, hidden_size, num_layers)
lstm_state = flatten_states(states)
for layer in range(num_layers):
custom_params = list(rnn.parameters())[4 * layer: 4 * (layer + 1)]
for lstm_param, custom_param in zip(lstm.all_weights[layer],
custom_params):
assert lstm_param.shape == custom_param.shape
with torch.no_grad():
lstm_param.copy_(custom_param)
lstm_out, lstm_out_state = lstm(inp, lstm_state)
assert (out - lstm_out).abs().max() < 1e-5
assert (custom_state[0] - lstm_out_state[0]).abs().max() < 1e-5
assert (custom_state[1] - lstm_out_state[1]).abs().max() < 1e-5
def test_script_stacked_bidir_rnn(seq_len, batch, input_size, hidden_size,
num_layers):
inp = torch.randn(seq_len, batch, input_size)
states = [[LSTMState(torch.randn(batch, hidden_size),
torch.randn(batch, hidden_size))
for _ in range(2)]
for _ in range(num_layers)]
rnn = script_lstm(input_size, hidden_size, num_layers, bidirectional=True)
out, out_state = rnn(inp, states)
custom_state = double_flatten_states(out_state)
# Control: pytorch native LSTM
lstm = nn.LSTM(input_size, hidden_size, num_layers, bidirectional=True)
lstm_state = double_flatten_states(states)
for layer in range(num_layers):
for direct in range(2):
index = 2 * layer + direct
custom_params = list(rnn.parameters())[4 * index: 4 * index + 4]
for lstm_param, custom_param in zip(lstm.all_weights[index],
custom_params):
assert lstm_param.shape == custom_param.shape
with torch.no_grad():
lstm_param.copy_(custom_param)
lstm_out, lstm_out_state = lstm(inp, lstm_state)
assert (out - lstm_out).abs().max() < 1e-5
assert (custom_state[0] - lstm_out_state[0]).abs().max() < 1e-5
assert (custom_state[1] - lstm_out_state[1]).abs().max() < 1e-5
def test_script_stacked_lstm_dropout(seq_len, batch, input_size, hidden_size,
num_layers):
inp = torch.randn(seq_len, batch, input_size)
states = [LSTMState(torch.randn(batch, hidden_size),
torch.randn(batch, hidden_size))
for _ in range(num_layers)]
rnn = script_lstm(input_size, hidden_size, num_layers, dropout=True)
# just a smoke test
out, out_state = rnn(inp, states)
def test_script_stacked_lnlstm(seq_len, batch, input_size, hidden_size,
num_layers):
inp = torch.randn(seq_len, batch, input_size)
states = [LSTMState(torch.randn(batch, hidden_size),
torch.randn(batch, hidden_size))
for _ in range(num_layers)]
rnn = script_lnlstm(input_size, hidden_size, num_layers)
# just a smoke test
out, out_state = rnn(inp, states)
test_script_rnn_layer(5, 2, 3, 7)
test_script_stacked_rnn(5, 2, 3, 7, 4)
test_script_stacked_bidir_rnn(5, 2, 3, 7, 4)
test_script_stacked_lstm_dropout(5, 2, 3, 7, 4)
test_script_stacked_lnlstm(5, 2, 3, 7, 4)
| pytorch-master | benchmarks/fastrnns/custom_lstms.py |
import argparse
import subprocess
import sys
import time
import torch
import datetime
from .runner import get_nn_runners
def run_rnn(name, rnn_creator, nloops=5,
seqLength=100, numLayers=1, inputSize=512, hiddenSize=512,
miniBatch=64, device='cuda', seed=None):
def run_iter(modeldef):
# Forward
forward_output = modeldef.forward(*modeldef.inputs)
# "loss computation" and backward
if modeldef.backward_setup is not None:
backward_input = modeldef.backward_setup(forward_output)
else:
backward_input = forward_output
if modeldef.backward is not None:
modeldef.backward(*backward_input)
# "Update" parameters
if modeldef.backward is not None:
with torch.no_grad():
for param in modeldef.params:
param.grad.zero_()
torch.cuda.synchronize()
assert device == 'cuda'
creator_args = dict(seqLength=seqLength, numLayers=numLayers,
inputSize=inputSize, hiddenSize=hiddenSize,
miniBatch=miniBatch, device=device, seed=seed)
modeldef = rnn_creator(**creator_args)
[run_iter(modeldef) for _ in range(nloops)]
def profile(rnns, sleep_between_seconds=1, nloops=5,
internal_run=True, # Unused, get rid of this TODO
seqLength=100, numLayers=1, inputSize=512, hiddenSize=512,
miniBatch=64, device='cuda', seed=None):
params = dict(seqLength=seqLength, numLayers=numLayers,
inputSize=inputSize, hiddenSize=hiddenSize,
miniBatch=miniBatch, device=device, seed=seed)
for name, creator, context in get_nn_runners(*rnns):
with context():
run_rnn(name, creator, nloops, **params)
time.sleep(sleep_between_seconds)
def system(command):
"""Returns (return-code, stdout, stderr)"""
print('[system] {}'.format(command))
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
output, err = p.communicate()
rc = p.returncode
output = output.decode("ascii")
err = err.decode("ascii")
return rc, output, err
def describe_sizes(**sizes):
# seqLength, numLayers, inputSize, hiddenSize, miniBatch
return 's{}-l{}-i{}-h{}-b{}'.format(
sizes['seqLength'],
sizes['numLayers'],
sizes['inputSize'],
sizes['hiddenSize'],
sizes['miniBatch'],
)
OUTPUT_DIR = '~/profout/'
def nvprof_output_filename(rnns, **params):
rnn_tag = '-'.join(rnns)
size_tag = describe_sizes(**params)
date_tag = datetime.datetime.now().strftime("%m%d%y-%H%M")
return '{}prof_{}_{}_{}.nvvp'.format(OUTPUT_DIR, rnn_tag,
size_tag, date_tag)
def nvprof(cmd, outpath):
return system('nvprof -o {} {}'.format(outpath, cmd))
def full_profile(rnns, **args):
profile_args = []
for k, v in args.items():
profile_args.append('--{}={}'.format(k, v))
profile_args.append('--rnns {}'.format(' '.join(rnns)))
profile_args.append('--internal_run')
outpath = nvprof_output_filename(rnns, **args)
cmd = '{} -m fastrnns.profile {}'.format(
sys.executable, ' '.join(profile_args))
rc, stdout, stderr = nvprof(cmd, outpath)
if rc != 0:
raise RuntimeError('stderr: {}\nstdout: {}'.format(stderr, stdout))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Profile RNNs')
parser.add_argument('--seqLength', default='100', type=int)
parser.add_argument('--numLayers', default='1', type=int)
parser.add_argument('--inputSize', default='512', type=int)
parser.add_argument('--hiddenSize', default='512', type=int)
parser.add_argument('--miniBatch', default='64', type=int)
parser.add_argument('--sleep_between_seconds', default='1', type=int)
parser.add_argument('--nloops', default='5', type=int)
parser.add_argument('--rnns', nargs='*',
help='What to run. cudnn, aten, jit, etc')
# if internal_run, we actually run the rnns.
# if not internal_run, we shell out to nvprof with internal_run=T
parser.add_argument('--internal_run', default=False, action='store_true',
help='Don\'t use this')
args = parser.parse_args()
if args.rnns is None:
args.rnns = ['cudnn', 'aten', 'jit']
print(args)
if args.internal_run:
profile(**vars(args))
else:
full_profile(**vars(args))
| pytorch-master | benchmarks/fastrnns/profile.py |
from .cells import * # noqa: F403
from .factory import * # noqa: F403
# (output, next_state) = cell(input, state)
seqLength = 100
numLayers = 2
inputSize = 512
hiddenSize = 512
miniBatch = 64
| pytorch-master | benchmarks/fastrnns/__init__.py |
import argparse
import torch
import torch.nn as nn
from .factory import pytorch_lstm_creator, varlen_pytorch_lstm_creator
from .runner import get_nn_runners
def barf():
import pdb
pdb.set_trace()
def assertEqual(tensor, expected, threshold=0.001):
if isinstance(tensor, list) or isinstance(tensor, tuple):
for t, e in zip(tensor, expected):
assertEqual(t, e)
else:
if (tensor - expected).abs().max() > threshold:
barf()
def filter_requires_grad(tensors):
return [t for t in tensors if t.requires_grad]
def test_rnns(experim_creator, control_creator, check_grad=True, verbose=False,
seqLength=100, numLayers=1, inputSize=512, hiddenSize=512,
miniBatch=64, device='cuda', seed=17):
creator_args = dict(seqLength=seqLength, numLayers=numLayers,
inputSize=inputSize, hiddenSize=hiddenSize,
miniBatch=miniBatch, device=device, seed=seed)
print("Setting up...")
control = control_creator(**creator_args)
experim = experim_creator(**creator_args)
# Precondition
assertEqual(experim.inputs, control.inputs)
assertEqual(experim.params, control.params)
print("Checking outputs...")
control_outputs = control.forward(*control.inputs)
experim_outputs = experim.forward(*experim.inputs)
assertEqual(experim_outputs, control_outputs)
print("Checking grads...")
assert control.backward_setup is not None
assert experim.backward_setup is not None
assert control.backward is not None
assert experim.backward is not None
control_backward_inputs = control.backward_setup(control_outputs, seed)
experim_backward_inputs = experim.backward_setup(experim_outputs, seed)
control.backward(*control_backward_inputs)
experim.backward(*experim_backward_inputs)
control_grads = [p.grad for p in control.params]
experim_grads = [p.grad for p in experim.params]
assertEqual(experim_grads, control_grads)
if verbose:
print(experim.forward.graph_for(*experim.inputs))
print('')
def test_vl_py(**test_args):
# XXX: This compares vl_py with vl_lstm.
# It's done this way because those two don't give the same outputs so
# the result isn't an apples-to-apples comparison right now.
control_creator = varlen_pytorch_lstm_creator
name, experim_creator, context = get_nn_runners('vl_py')[0]
with context():
print('testing {}...'.format(name))
creator_keys = [
'seqLength', 'numLayers', 'inputSize',
'hiddenSize', 'miniBatch', 'device', 'seed'
]
creator_args = {key: test_args[key] for key in creator_keys}
print("Setting up...")
control = control_creator(**creator_args)
experim = experim_creator(**creator_args)
# Precondition
assertEqual(experim.inputs, control.inputs[:2])
assertEqual(experim.params, control.params)
print("Checking outputs...")
control_out, control_hiddens = control.forward(*control.inputs)
control_hx, control_cx = control_hiddens
experim_out, experim_hiddens = experim.forward(*experim.inputs)
experim_hx, experim_cx = experim_hiddens
experim_padded = nn.utils.rnn.pad_sequence(experim_out).squeeze(-2)
assertEqual(experim_padded, control_out)
assertEqual(torch.cat(experim_hx, dim=1), control_hx)
assertEqual(torch.cat(experim_cx, dim=1), control_cx)
print("Checking grads...")
assert control.backward_setup is not None
assert experim.backward_setup is not None
assert control.backward is not None
assert experim.backward is not None
control_backward_inputs = control.backward_setup(
(control_out, control_hiddens), test_args['seed'])
experim_backward_inputs = experim.backward_setup(
(experim_out, experim_hiddens), test_args['seed'])
control.backward(*control_backward_inputs)
experim.backward(*experim_backward_inputs)
control_grads = [p.grad for p in control.params]
experim_grads = [p.grad for p in experim.params]
assertEqual(experim_grads, control_grads)
if test_args['verbose']:
print(experim.forward.graph_for(*experim.inputs))
print('')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Test lstm correctness')
parser.add_argument('--seqLength', default='100', type=int)
parser.add_argument('--numLayers', default='1', type=int)
parser.add_argument('--inputSize', default='512', type=int)
parser.add_argument('--hiddenSize', default='512', type=int)
parser.add_argument('--miniBatch', default='64', type=int)
parser.add_argument('--device', default='cuda', type=str)
parser.add_argument('--check_grad', default='True', type=bool)
parser.add_argument('--variable_lstms', action='store_true')
parser.add_argument('--seed', default='17', type=int)
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--rnns', nargs='*',
help='What to run. jit_premul, jit, etc')
args = parser.parse_args()
if args.rnns is None:
args.rnns = ['jit_premul', 'jit']
print(args)
if 'cuda' in args.device:
assert torch.cuda.is_available()
rnn_runners = get_nn_runners(*args.rnns)
should_test_varlen_lstms = args.variable_lstms
test_args = vars(args)
del test_args['rnns']
del test_args['variable_lstms']
if should_test_varlen_lstms:
test_vl_py(**test_args)
for name, creator, context in rnn_runners:
with context():
print('testing {}...'.format(name))
test_rnns(creator, pytorch_lstm_creator, **test_args)
| pytorch-master | benchmarks/fastrnns/test.py |
import torch
@torch.jit.script
def fn(x, scale, shift):
return scale * x / shift
@torch.jit.script
def recurrent(x, scale, shift):
y = x
for i in range(100):
y = fn(y, scale, shift)
return y
x = torch.randn(2, 2, device='cuda')
scale = torch.randn(2, 2, device='cuda', requires_grad=True)
shift = torch.randn(2, 2, device='cuda', requires_grad=True)
inputs = [x, scale, shift]
out = recurrent(x, scale, shift)
recurrent.graph_for(x, scale, shift)
import torch
@torch.jit.script
def recurrent_scaleshift(x, scale, shift):
y = x
for i in range(64):
y = scale * y + shift
return y
x = torch.randn(2, 2, device='cuda')
scale = torch.randn(2, 2, device='cuda', requires_grad=True)
shift = torch.randn(2, 2, device='cuda', requires_grad=True)
inputs = [x, scale, shift]
out = recurrent_scaleshift(x, scale, shift)
recurrent_scaleshift.graph_for(x, scale, shift)
import torch
x = torch.tensor([])
x.requires_grad = True
x.mean().backward() # no error triggered
x = x.cuda()
x.mean().backward()
| pytorch-master | benchmarks/fastrnns/scratch.py |
import torch
from collections import namedtuple
from typing import List, Tuple
from torch import Tensor
from .cells import lstm_cell, premul_lstm_cell, premul_lstm_cell_no_bias, flat_lstm_cell
# list[list[T]] -> list[T]
def flatten_list(lst):
result = []
for inner in lst:
result.extend(inner)
return result
'''
Define a creator as a function:
(options) -> (inputs, params, forward, backward_setup, backward)
inputs: the inputs to the returned 'forward'. One can call
forward(*inputs) directly.
params: List[Tensor] all requires_grad=True parameters.
forward: function / graph executor / module
One can call rnn(rnn_inputs) using the outputs of the creator.
backward_setup: backward_inputs = backward_setup(*outputs)
Then, we pass backward_inputs to backward. If None, then it is assumed to
be the identity function.
backward: Given `output = backward_setup(*forward(*inputs))`, performs
backpropagation. If None, then nothing happens.
fastrnns.bench times the forward and backward invocations.
'''
ModelDef = namedtuple('ModelDef', [
'inputs', 'params', 'forward', 'backward_setup', 'backward'])
def lstm_backward_setup(lstm_outputs, seed=None):
hx, _ = lstm_outputs
return simple_backward_setup(hx, seed)
def simple_backward_setup(output, seed=None):
assert isinstance(output, torch.Tensor)
if seed:
torch.manual_seed(seed)
grad_output = torch.randn_like(output)
return output, grad_output
def simple_backward(output, grad_output, **kwargs):
return output.backward(grad_output, **kwargs)
def pytorch_lstm_creator(**kwargs):
input, hidden, _, module = lstm_inputs(return_module=True, **kwargs)
return ModelDef(
inputs=[input, hidden],
params=flatten_list(module.all_weights),
forward=module,
backward_setup=lstm_backward_setup,
backward=simple_backward)
def lstm_creator(script=True, **kwargs):
input, hidden, params, _ = lstm_inputs(return_module=False, **kwargs)
inputs = [input, hidden] + params[0]
return ModelDef(
inputs=inputs,
params=flatten_list(params),
forward=lstm_factory(lstm_cell, script),
backward_setup=lstm_backward_setup,
backward=simple_backward)
def lnlstm_creator(script=True, decompose_layernorm=False, **kwargs):
assert script is True
from .custom_lstms import script_lnlstm
input_size = kwargs['inputSize']
hidden_size = kwargs['hiddenSize']
seq_len = kwargs['seqLength']
batch_size = kwargs['miniBatch']
ge = script_lnlstm(input_size, hidden_size, 1,
decompose_layernorm=decompose_layernorm).cuda()
input = torch.randn(seq_len, batch_size, input_size, device='cuda')
states = [(torch.randn(batch_size, hidden_size, device='cuda'),
torch.randn(batch_size, hidden_size, device='cuda'))]
return ModelDef(
inputs=[input, states],
params=ge.parameters(),
forward=ge,
backward_setup=lstm_backward_setup,
backward=simple_backward)
def dropoutlstm_creator(script=True, **kwargs):
assert script is True
from .custom_lstms import script_lstm, LSTMState
input_size = kwargs['inputSize']
hidden_size = kwargs['hiddenSize']
seq_len = kwargs['seqLength']
batch_size = kwargs['miniBatch']
num_layers = kwargs['numLayers']
ge = script_lstm(input_size, hidden_size, num_layers, dropout=True).cuda()
input = torch.randn(seq_len, batch_size, input_size, device='cuda')
states = [LSTMState(torch.randn(batch_size, hidden_size, device='cuda'),
torch.randn(batch_size, hidden_size, device='cuda'))
for _ in range(num_layers)]
return ModelDef(
inputs=[input, states],
params=ge.parameters(),
forward=ge,
backward_setup=lstm_backward_setup,
backward=simple_backward)
def lstm_premul_creator(script=True, **kwargs):
input, hidden, params, _ = lstm_inputs(return_module=False, **kwargs)
inputs = [input, hidden] + params[0]
return ModelDef(
inputs=inputs,
params=flatten_list(params),
forward=lstm_factory_premul(premul_lstm_cell, script),
backward_setup=lstm_backward_setup,
backward=simple_backward)
def lstm_premul_bias_creator(script=True, **kwargs):
input, hidden, params, _ = lstm_inputs(return_module=False, **kwargs)
inputs = [input, hidden] + params[0]
return ModelDef(
inputs=inputs,
params=flatten_list(params),
forward=lstm_factory_premul_bias(premul_lstm_cell_no_bias, script),
backward_setup=lstm_backward_setup,
backward=simple_backward)
def lstm_simple_creator(script=True, **kwargs):
input, hidden, params, _ = lstm_inputs(return_module=False, **kwargs)
inputs = [input] + [h[0] for h in hidden] + params[0]
return ModelDef(
inputs=inputs,
params=flatten_list(params),
forward=lstm_factory_simple(flat_lstm_cell, script),
backward_setup=lstm_backward_setup,
backward=simple_backward)
def lstm_multilayer_creator(script=True, **kwargs):
input, hidden, params, _ = lstm_inputs(return_module=False, **kwargs)
inputs = [input, hidden, flatten_list(params)]
return ModelDef(
inputs=inputs,
params=flatten_list(params),
forward=lstm_factory_multilayer(lstm_cell, script),
backward_setup=lstm_backward_setup,
backward=simple_backward)
def imagenet_cnn_creator(arch, jit=True):
def creator(device='cuda', **kwargs):
model = arch().to(device)
x = torch.randn(32, 3, 224, 224, device=device)
if jit:
model = torch.jit.trace(model, x)
return ModelDef(
inputs=(x,),
params=list(model.parameters()),
forward=model,
backward_setup=simple_backward_setup,
backward=simple_backward)
return creator
def varlen_lstm_inputs(minlen=30, maxlen=100,
numLayers=1, inputSize=512, hiddenSize=512,
miniBatch=64, return_module=False, device='cuda',
seed=None, **kwargs):
if seed is not None:
torch.manual_seed(seed)
lengths = torch.randint(
low=minlen, high=maxlen, size=[miniBatch],
dtype=torch.long, device=device)
x = [torch.randn(length, inputSize, device=device)
for length in lengths]
hx = torch.randn(numLayers, miniBatch, hiddenSize, device=device)
cx = torch.randn(numLayers, miniBatch, hiddenSize, device=device)
lstm = torch.nn.LSTM(inputSize, hiddenSize, numLayers).to(device)
if return_module:
return x, lengths, (hx, cx), lstm.all_weights, lstm
else:
# NB: lstm.all_weights format:
# wih, whh, bih, bhh = lstm.all_weights[layer]
return x, lengths, (hx, cx), lstm.all_weights, None
def varlen_lstm_backward_setup(forward_output, seed=None):
if seed:
torch.manual_seed(seed)
rnn_utils = torch.nn.utils.rnn
sequences = forward_output[0]
padded = rnn_utils.pad_sequence(sequences)
grad = torch.randn_like(padded)
return padded, grad
def varlen_pytorch_lstm_creator(**kwargs):
rnn_utils = torch.nn.utils.rnn
sequences, _, hidden, _, module = varlen_lstm_inputs(
return_module=True, **kwargs)
def forward(sequences, hidden):
packed = rnn_utils.pack_sequence(sequences, enforce_sorted=False)
out, new_hidden = module(packed, hidden)
padded, lengths = rnn_utils.pad_packed_sequence(out)
# XXX: It's more efficient to store the output in its padded form,
# but that might not be conducive to loss computation.
# Un-padding the output also makes the backward pass 2x slower...
# return [padded[:lengths[i], i, :] for i in range(lengths.size(0))]
return padded, new_hidden
return ModelDef(
inputs=[sequences, hidden],
params=flatten_list(module.all_weights),
forward=forward,
backward_setup=lstm_backward_setup,
backward=simple_backward)
def varlen_lstm_factory(cell, script):
def dynamic_rnn(sequences: List[Tensor], hiddens: Tuple[Tensor, Tensor], wih: Tensor,
whh: Tensor, bih: Tensor, bhh: Tensor
) -> Tuple[List[Tensor], Tuple[List[Tensor], List[Tensor]]]:
hx, cx = hiddens
hxs = hx.unbind(1)
cxs = cx.unbind(1)
# List of: (output, hx, cx)
outputs = []
hx_outs = []
cx_outs = []
for batch in range(len(sequences)):
output = []
hy, cy = hxs[batch], cxs[batch]
inputs = sequences[batch].unbind(0)
for seq_idx in range(len(inputs)):
hy, cy = cell(
inputs[seq_idx].unsqueeze(0), (hy, cy), wih, whh, bih, bhh)
output += [hy]
outputs += [torch.stack(output)]
hx_outs += [hy.unsqueeze(0)]
cx_outs += [cy.unsqueeze(0)]
return outputs, (hx_outs, cx_outs)
if script:
cell = torch.jit.script(cell)
dynamic_rnn = torch.jit.script(dynamic_rnn)
return dynamic_rnn
def varlen_lstm_creator(script=False, **kwargs):
sequences, _, hidden, params, _ = varlen_lstm_inputs(
return_module=False, **kwargs)
inputs = [sequences, hidden] + params[0]
return ModelDef(
inputs=inputs,
params=flatten_list(params),
forward=varlen_lstm_factory(lstm_cell, script),
backward_setup=varlen_lstm_backward_setup,
backward=simple_backward)
# cudnn_layernorm_lstm: since cudnn does not have Layernorm LSTM, we cannot benchmark
# the lowerbound directly. Instead, we only benchmark the forward pass by mimicing the
# computation of a cudnn lstm + seq_len * 3 layernorm computation. This should serve
# as a perf lowerbound for the Layernorm LSTM forward pass(given that Layernorm itself
# is invariant), the lowerbound of backward pass is hard to get since we lose the
# intermediate results, we can still optimize the layernorm implementation to make
# a faster forward lowerbound though.
def layernorm_pytorch_lstm_creator(**kwargs):
input, hidden, _, module = lstm_inputs(return_module=True, **kwargs)
batch_size = kwargs['miniBatch']
hidden_size = kwargs['hiddenSize']
ln_i = torch.nn.LayerNorm(4 * hidden_size).cuda()
ln_h = torch.nn.LayerNorm(4 * hidden_size).cuda()
ln_c = torch.nn.LayerNorm(hidden_size).cuda()
ln_input1 = torch.randn(batch_size, 4 * hidden_size, device='cuda')
def forward(input, hidden):
out, new_hidden = module(input, hidden)
# plus (seq_len * three laynorm cell computation) to mimic the lower bound of
# Layernorm cudnn LSTM in the forward pass
seq_len = len(input.unbind(0))
hy, cy = new_hidden
for i in range(seq_len):
ln_i_output = ln_i(ln_input1)
ln_h_output = ln_h(ln_input1)
cy = ln_c(cy)
return out, (hy, cy)
return ModelDef(
inputs=[input, hidden],
params=flatten_list(module.all_weights),
forward=forward,
backward_setup=lstm_backward_setup,
backward=None)
# input: lstm.all_weights format (wih, whh, bih, bhh = lstm.all_weights[layer])
# output: packed_weights with format
# packed_weights[0] is wih with size (layer, 4*hiddenSize, inputSize)
# packed_weights[1] is whh with size (layer, 4*hiddenSize, hiddenSize)
# packed_weights[2] is bih with size (layer, 4*hiddenSize)
# packed_weights[3] is bhh with size (layer, 4*hiddenSize)
def stack_weights(weights):
def unzip_columns(mat):
assert isinstance(mat, list)
assert isinstance(mat[0], list)
layers = len(mat)
columns = len(mat[0])
return [[mat[layer][col] for layer in range(layers)]
for col in range(columns)]
# XXX: script fns have problems indexing multidim lists, so we try to
# avoid them by stacking tensors
all_weights = weights
packed_weights = [torch.stack(param)
for param in unzip_columns(all_weights)]
return packed_weights
# returns: x, (hx, cx), all_weights, lstm module with all_weights as params
def lstm_inputs(seqLength=100, numLayers=1, inputSize=512, hiddenSize=512,
miniBatch=64, dropout=0.0, return_module=False, device='cuda', seed=None):
if seed is not None:
torch.manual_seed(seed)
x = torch.randn(seqLength, miniBatch, inputSize, device=device)
hx = torch.randn(numLayers, miniBatch, hiddenSize, device=device)
cx = torch.randn(numLayers, miniBatch, hiddenSize, device=device)
lstm = torch.nn.LSTM(inputSize, hiddenSize, numLayers, dropout=dropout)
if 'cuda' in device:
lstm = lstm.cuda()
if return_module:
return x, (hx, cx), lstm.all_weights, lstm
else:
# NB: lstm.all_weights format:
# wih, whh, bih, bhh = lstm.all_weights[layer]
return x, (hx, cx), lstm.all_weights, None
def lstm_factory(cell, script):
def dynamic_rnn(input: Tensor, hidden: Tuple[Tensor, Tensor], wih: Tensor, whh: Tensor,
bih: Tensor, bhh: Tensor) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
hx, cx = hidden
outputs = []
inputs = input.unbind(0)
hy, cy = hx[0], cx[0]
for seq_idx in range(len(inputs)):
hy, cy = cell(inputs[seq_idx], (hy, cy), wih, whh, bih, bhh)
outputs += [hy]
return torch.stack(outputs), (hy.unsqueeze(0), cy.unsqueeze(0))
if script:
cell = torch.jit.script(cell)
dynamic_rnn = torch.jit.script(dynamic_rnn)
return dynamic_rnn
# premul: we're going to premultiply the inputs & weights
def lstm_factory_premul(premul_cell, script):
def dynamic_rnn(input: Tensor, hidden: Tuple[Tensor, Tensor], wih: Tensor, whh: Tensor,
bih: Tensor, bhh: Tensor) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
hx, cx = hidden
outputs = []
inputs = torch.matmul(input, wih.t()).unbind(0)
hy, cy = hx[0], cx[0]
for seq_idx in range(len(inputs)):
hy, cy = premul_cell(inputs[seq_idx], (hy, cy), whh, bih, bhh)
outputs += [hy]
return torch.stack(outputs), (hy.unsqueeze(0), cy.unsqueeze(0))
if script:
premul_cell = torch.jit.script(premul_cell)
dynamic_rnn = torch.jit.script(dynamic_rnn)
return dynamic_rnn
# premul: we're going to premultiply the inputs & weights, and add bias
def lstm_factory_premul_bias(premul_cell, script):
def dynamic_rnn(input: Tensor, hidden: Tuple[Tensor, Tensor], wih: Tensor, whh: Tensor,
bih: Tensor, bhh: Tensor) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
hx, cx = hidden
outputs = []
inpSize = input.size()
# add bias for all timesteps instead of going step-by-step, results in a single reduction kernel in the backward
# FIXME matmul(x,y) + bias currently goes through jit AD, and backward formula in AD is not optimized for this
# case. Workaround with mm and views.
inpSize = input.size()
inputs = torch.mm(input.view(-1, inpSize[2]), wih.t()) + bih
inputs = inputs.view(inpSize[0], inpSize[1], -1).unbind(0)
hy, cy = hx[0], cx[0]
for seq_idx in range(len(inputs)):
hy, cy = premul_cell(inputs[seq_idx], (hy, cy), whh, bhh)
outputs += [hy]
return torch.stack(outputs), (hy.unsqueeze(0), cy.unsqueeze(0))
if script:
premul_cell = torch.jit.script(premul_cell)
dynamic_rnn = torch.jit.script(dynamic_rnn)
return dynamic_rnn
# simple: flat inputs (no tuples), no list to accumulate outputs
# useful mostly for benchmarking older JIT versions
def lstm_factory_simple(cell, script):
def dynamic_rnn(input, hx, cx, wih, whh, bih, bhh):
hy = hx # for scoping
cy = cx # for scoping
inputs = input.unbind(0)
for seq_idx in range(len(inputs)):
hy, cy = cell(inputs[seq_idx], hy, cy, wih, whh, bih, bhh)
return hy, cy
if script:
cell = torch.jit.script(cell)
dynamic_rnn = torch.jit.script(dynamic_rnn)
return dynamic_rnn
def lstm_factory_multilayer(cell, script):
def dynamic_rnn(input: Tensor, hidden: Tuple[Tensor, Tensor], params: List[Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
params_stride = 4 # NB: this assumes that biases are there
hx, cx = hidden
hy, cy = hidden # for scoping...
inputs, outputs = input.unbind(0), []
for layer in range(hx.size(0)):
hy = hx[layer]
cy = cx[layer]
base_idx = layer * params_stride
wih = params[base_idx]
whh = params[base_idx + 1]
bih = params[base_idx + 2]
bhh = params[base_idx + 3]
for seq_idx in range(len(inputs)):
hy, cy = cell(inputs[seq_idx], (hy, cy), wih, whh, bih, bhh)
outputs += [hy]
inputs, outputs = outputs, []
return torch.stack(inputs), (hy.unsqueeze(0), cy.unsqueeze(0))
if script:
cell = torch.jit.script(cell)
dynamic_rnn = torch.jit.script(dynamic_rnn)
return dynamic_rnn
| pytorch-master | benchmarks/fastrnns/factory.py |
import argparse
from collections import namedtuple
import torch
import gc
import sys
import json
import copy
import time
from torch.autograd.profiler import record_function
from .fuser import set_fuser
from .runner import get_nn_runners
BenchResult = namedtuple('BenchResult', [
'name', 'avg_fwd', 'std_fwd', 'info_fwd', 'avg_bwd', 'std_bwd', 'info_bwd',
])
def fit_str(string, colwidth=16):
if len(string) < colwidth:
return (colwidth - len(string)) * ' ' + string
else:
return string[:colwidth]
def to_str(item):
if isinstance(item, float):
return '%.4g' % item
return str(item)
def print_header(colwidth=16, sep=' '):
items = []
for item in BenchResult._fields:
items.append(fit_str(item))
return sep.join(items)
def pretty_print(benchresult, colwidth=16, sep=' '):
items = []
for thing in benchresult:
items.append(fit_str(to_str(thing)))
return sep.join(items)
# shim for torch.cuda.Event when running on cpu
class Event(object):
def __init__(self, enable_timing):
pass
def record(self):
self.time = time.perf_counter()
def elapsed_time(self, end_event):
assert isinstance(end_event, Event)
return end_event.time - self.time
def trainbench(name, rnn_creator, nloops=100, warmup=10,
seqLength=100, numLayers=1, inputSize=512, hiddenSize=512,
miniBatch=64, device='cuda', seed=None):
def train_batch(modeldef):
# CUDA events for timing
if device == 'cuda':
timer_class = torch.cuda.Event
else:
timer_class = Event
fwd_start_event = timer_class(enable_timing=True)
fwd_end_event = timer_class(enable_timing=True)
bwd_start_event = timer_class(enable_timing=True)
bwd_end_event = timer_class(enable_timing=True)
gc.collect()
fwd_start_event.record()
with record_function("## forward ##"):
forward_output = modeldef.forward(*modeldef.inputs)
fwd_end_event.record()
# XXX: Use if need to print something
# print(modeldef.forward.graph_for(*modeldef.inputs))
if modeldef.backward_setup is not None:
backward_input = modeldef.backward_setup(forward_output)
else:
backward_input = forward_output
gc.collect()
bwd_start_event.record()
if modeldef.backward is not None:
modeldef.backward(*backward_input)
bwd_end_event.record()
if modeldef.backward is not None:
with torch.no_grad():
for param in modeldef.params:
assert param.grad is not None
param.grad.zero_()
if device == 'cuda':
torch.cuda.synchronize()
fwd_time = fwd_start_event.elapsed_time(fwd_end_event)
bwd_time = bwd_start_event.elapsed_time(bwd_end_event)
return fwd_time, bwd_time
creator_args = creator_args = {
'seqLength': seqLength, 'numLayers': numLayers,
'inputSize': inputSize, 'hiddenSize': hiddenSize,
'miniBatch': miniBatch, 'device': device, 'seed': seed
}
modeldef = rnn_creator(**creator_args)
[train_batch(modeldef) for _ in range(warmup)]
results = [train_batch(modeldef) for _ in range(nloops)]
fwd_times, bwd_times = zip(*results)
fwd_times = torch.tensor(fwd_times)
bwd_times = torch.tensor(bwd_times)
return BenchResult(name=name,
avg_fwd=fwd_times.mean().item(),
std_fwd=fwd_times.std().item(),
info_fwd=fwd_times,
avg_bwd=bwd_times.mean().item(),
std_bwd=bwd_times.std().item(),
info_bwd=bwd_times)
def print_stderr(*args, **kwargs):
kwargs['file'] = sys.stderr
return print(*args, **kwargs)
def print_json_oss_format(results):
oss_results = {}
for group_name, group_val in results.items():
oss_results[group_name] = {}
for model_name, run_time in group_val.items():
# Output for OSS
oss_results[group_name][model_name] = run_time['avg']
print(json.dumps(oss_results))
def print_json_pep_format(results):
# print the AI-PEP format json string for each model
for group_name, group_val in results.items():
for model_name, run_time in group_val.items():
# Output for AI-PEP
num_iters = len(run_time['info'])
info = run_time['info'].tolist()
for i in range(num_iters):
print("Caffe2Observer " + json.dumps(
{
"type": "NET",
"metric": group_name + "-" + model_name,
"unit": "ms",
"value": str(info[i])
}
))
def bench(rnn_runners, group_name, print_json=False, sep=' ', **params):
print_stderr(print_header(sep=sep))
results = {}
for name, creator, context in rnn_runners:
with context():
try:
result = trainbench(name, creator, **params)
# Replace the value of info_fwd and info_bwd to None
result_with_no_info = result._replace(
info_fwd='None', info_bwd='None')
print_stderr(pretty_print(result_with_no_info, sep=sep))
results[name] = result
except Exception as e:
if not print_json:
raise
return {
group_name: {k: {"avg": v.avg_fwd, "std": v.std_fwd, "info": v.info_fwd} for k, v in results.items()},
group_name + '-backward': {k: {"avg": v.avg_bwd, "std": v.std_bwd, "info": v.info_bwd} for k, v in results.items()},
}
def bench_group(model_list, bench_name, bench_group, bench_args):
print_stderr('Benchmarking {}s...'.format(bench_name))
nn_results = bench(get_nn_runners(*model_list), bench_group, **bench_args)
print_stderr('')
return nn_results
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Profile RNNs')
# groups help control which test group you want to run
# if you only want to run one/two benchmark, run it with
# e.g: python -m fastrnns.bench --rnns jit and --group rnns
default_groups = ['cnns', 'rnns']
parser.add_argument('--seqLength', default='100', type=int)
parser.add_argument('--numLayers', default='1', type=int)
parser.add_argument('--inputSize', default='512', type=int)
parser.add_argument('--hiddenSize', default='512', type=int)
parser.add_argument('--miniBatch', default='64', type=int)
parser.add_argument('--warmup', default='10', type=int)
parser.add_argument('--nloops', default='100', type=int)
parser.add_argument('--device', default='cuda', type=str)
parser.add_argument('--variable_lstms', action='store_true',
help='Also benchmark variable sequence length lstms '
'Note that some of these run really slowly '
'and that the `seqLength` flag will be ignored.')
parser.add_argument('--sep', default=' ', type=str)
parser.add_argument('--print-json', nargs='?', default=None, const='oss')
parser.add_argument('--rnns', nargs='*',
help='What to run. cudnn, aten, jit, etc')
parser.add_argument('--cnns', nargs='*',
help='What to run. resnet18, resnet18_jit, resnet50, etc')
parser.add_argument('--group', nargs='*', default=default_groups, help='Which group to run. cnns, rnns, etc.')
parser.add_argument('--fuser', default='te', type=str,
help='The fuser backend to use. One of: te, old, or none')
parser.add_argument('--executor', default=None, type=str,
help='The executor to use. One of: legacy, simple, profiling')
parser.add_argument('--cuda_pointwise_loop_level', default=None, type=int)
parser.add_argument('--cuda_pointwise_block_count', default=None, type=int)
parser.add_argument('--cuda_pointwise_block_size', default=None, type=int)
args = parser.parse_args()
set_fuser(args.fuser, args.executor)
if args.cuda_pointwise_loop_level:
torch._C._jit_set_te_cuda_pointwise_loop_levels(args.cuda_pointwise_loop_level)
if args.cuda_pointwise_block_count:
torch._C._jit_set_te_cuda_pointwise_block_count(args.cuda_pointwise_block_count)
if args.cuda_pointwise_block_size:
torch._C._jit_set_te_cuda_pointwise_block_size(args.cuda_pointwise_block_size)
rnns = args.rnns or ['cudnn', 'aten', 'jit', 'jit_premul', 'jit_premul_bias', 'jit_simple',
'jit_multilayer', 'py']
cnns = args.cnns or ['resnet18', 'resnet18_jit', 'resnet50', 'resnet50_jit']
# TODO: Maybe add a separate section for the layernorm/dropout lstms
# 'cudnn_layernorm', jit_layernorm', 'jit_layernom_decom',
# 'jit', 'jit_dropout', 'cudnn_dropout'
vlrnns = ['vl_cudnn', 'vl_jit', 'vl_py']
if args.print_json:
print_stderr = lambda *args, **kwargs: None # noqa: E731,F811
print_stderr(args)
bench_args = copy.deepcopy(vars(args))
should_bench_varlen_lstms = args.variable_lstms
del bench_args['group']
del bench_args['rnns']
del bench_args['cnns']
del bench_args['variable_lstms']
del bench_args['fuser']
del bench_args['executor']
del bench_args['cuda_pointwise_loop_level']
del bench_args['cuda_pointwise_block_count']
del bench_args['cuda_pointwise_block_size']
results = {}
if should_bench_varlen_lstms:
if args.nloops + args.warmup > 30:
print_stderr(
'WARNING: some of the variable sequence length lstms are '
'very unoptimized and therefore take forever to run.')
results.update(bench_group(vlrnns, 'variable-length sequence LSTM', 'vl_lstm', bench_args))
if 'rnns' in args.group:
results.update(bench_group(rnns, 'LSTM', 'lstm', bench_args))
if 'cnns' in args.group:
results.update(bench_group(cnns, 'ResNet', 'resnet', bench_args))
if args.print_json == 'oss':
print_json_oss_format(results)
elif args.print_json == 'pep':
print_json_pep_format(results)
| pytorch-master | benchmarks/fastrnns/bench.py |
import torch
from typing import Tuple
from torch import Tensor
def milstm_cell(x, hx, cx, w_ih, w_hh, alpha, beta_i, beta_h, bias):
Wx = x.mm(w_ih.t())
Uz = hx.mm(w_hh.t())
# Section 2.1 in https://arxiv.org/pdf/1606.06630.pdf
gates = (alpha * Wx * Uz + beta_i * Wx + beta_h * Uz + bias)
# Same as LSTMCell after this point
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = ingate.sigmoid()
forgetgate = forgetgate.sigmoid()
cellgate = cellgate.tanh()
outgate = outgate.sigmoid()
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * cy.tanh()
return hy, cy
def lstm_cell(input: Tensor, hidden: Tuple[Tensor, Tensor], w_ih: Tensor,
w_hh: Tensor, b_ih: Tensor, b_hh: Tensor) -> Tuple[Tensor, Tensor]:
hx, cx = hidden
gates = torch.mm(input, w_ih.t()) + torch.mm(hx, w_hh.t()) + b_ih + b_hh
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * torch.tanh(cy)
return hy, cy
def flat_lstm_cell(input: Tensor, hx: Tensor, cx: Tensor, w_ih: Tensor,
w_hh: Tensor, b_ih: Tensor, b_hh: Tensor) -> Tuple[Tensor, Tensor]:
gates = torch.mm(input, w_ih.t()) + torch.mm(hx, w_hh.t()) + b_ih + b_hh
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * torch.tanh(cy)
return hy, cy
def premul_lstm_cell(igates: Tensor, hidden: Tuple[Tensor, Tensor], w_hh: Tensor,
b_ih: Tensor, b_hh: Tensor) -> Tuple[Tensor, Tensor]:
hx, cx = hidden
gates = igates + torch.mm(hx, w_hh.t()) + b_ih + b_hh
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * torch.tanh(cy)
return hy, cy
def premul_lstm_cell_no_bias(igates: Tensor, hidden: Tuple[Tensor, Tensor], w_hh: Tensor, b_hh: Tensor) -> Tuple[Tensor, Tensor]:
hx, cx = hidden
gates = igates + torch.mm(hx, w_hh.t()) + b_hh
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * torch.tanh(cy)
return hy, cy
def gru_cell(input, hidden, w_ih, w_hh, b_ih, b_hh):
gi = torch.mm(input, w_ih.t()) + b_ih
gh = torch.mm(hidden, w_hh.t()) + b_hh
i_r, i_i, i_n = gi.chunk(3, 1)
h_r, h_i, h_n = gh.chunk(3, 1)
resetgate = torch.sigmoid(i_r + h_r)
inputgate = torch.sigmoid(i_i + h_i)
newgate = torch.tanh(i_n + resetgate * h_n)
hy = newgate + inputgate * (hidden - newgate)
return hy
def rnn_relu_cell(input, hidden, w_ih, w_hh, b_ih, b_hh):
igates = torch.mm(input, w_ih.t()) + b_ih
hgates = torch.mm(hidden, w_hh.t()) + b_hh
return torch.relu(igates + hgates)
def rnn_tanh_cell(input, hidden, w_ih, w_hh, b_ih, b_hh):
igates = torch.mm(input, w_ih.t()) + b_ih
hgates = torch.mm(hidden, w_hh.t()) + b_hh
return torch.tanh(igates + hgates)
| pytorch-master | benchmarks/fastrnns/cells.py |
import torch
from torch.utils.data import Dataset
def collate_sentences_lm(samples):
if len(samples) == 0:
return {}
id = torch.LongTensor([s["id"] for s in samples])
src_tokens = torch.stack([s["source"] for s in samples], 0)
tgt_tokens = torch.stack([s["target"] for s in samples], 0)
ntokens = len(samples) * len(samples[0]["target"])
src_lengths = torch.LongTensor([len(samples[0]["source"])] * len(samples))
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"input": src_tokens,
"target": tgt_tokens,
}
return batch
class BenchmarkLMDataset(Dataset):
"""
Dataset to benchmark a translation like seq2seq task.
Args:
vocab_size (int, optional): size of the vocabulary (default 10000).
max_source_positions (int, optional): max number of tokens in the
source sentence (default: 1024).
total_samples (int, optional): the total number of rows in the
dataset (default: 10000).
"""
def __init__(
self, vocab_size=10000, max_source_positions=1024, total_samples=10000,
):
self.vocab_size = vocab_size
self.max_source_positions = max_source_positions
self.total_samples = total_samples
self.sizes = [self.max_source_positions] * self.total_samples
def __getitem__(self, index):
length = self.sizes[index]
source = torch.randint(1, self.vocab_size, (length,))
target = source.clone()
return {
"id": index,
"source": source,
"target": target,
}
def __len__(self):
return self.total_samples
| pytorch-master | benchmarks/distributed/pipeline/benchmark_dataset.py |
import argparse
import math
import os
import time
from benchmark_dataset import BenchmarkLMDataset, collate_sentences_lm
import torch
from torch.distributed import rpc
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.distributed.pipeline.sync import Pipe
from torch.distributed.pipeline.sync.utils import partition_model
from torch.optim import Adam
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti']:
if abs(num) < 1024.0:
return "%3.2f%sB" % (num, unit)
num /= 1024.0
def init_random_seed(seed: int):
import numpy
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
numpy.random.seed(seed)
iteration_count = 0
class EmbeddingLayer(nn.Embedding):
def __init__(self, ntoken, ninp, initrange):
super().__init__(ntoken, ninp)
self.ninp = ninp
nn.init.uniform_(self.weight, -initrange, initrange)
def forward(self, src):
return super().forward(src) * math.sqrt(self.ninp)
class PositionalEncodingLayer(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncodingLayer, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer("pe", pe)
def forward(self, x):
x = x + self.pe[: x.size(0), :]
return self.dropout(x)
class TransformerDecoderLayer(nn.TransformerEncoderLayer):
"""Though this class inherits from torch.nn.TransformerEncoderLayer,
it functions as a decoder in this model"""
def __init__(self, ninp, nhead, nhid, droupout):
super().__init__(ninp, nhead, nhid, droupout)
self.src_mask = None
def forward(self, src):
global iteration_count
iteration_count += 1
if self.src_mask is None or self.src_mask.size(0) != len(src):
device = src.device
mask = nn.Transformer.generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
return super().forward(src, self.src_mask)
class LinearLayer(nn.Linear):
def __init__(self, ninp, ntoken, initrange):
super().__init__(ninp, ntoken)
nn.init.zeros_(self.bias)
nn.init.uniform_(self.weight, -initrange, initrange)
class TransformerLMSequential(nn.Sequential):
"""A small language model based on the design of GPT-2 using nn.Sequential
for compatibility with Pipe"""
def __init__(self, ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder):
layers = [
EmbeddingLayer(ntokens, ninp, initrange),
PositionalEncodingLayer(ninp, dropout),
]
for _ in range(ndecoder):
layers.append(TransformerDecoderLayer(ninp, nhead, nhid, dropout))
layers.append(LinearLayer(ninp, ntokens, initrange))
super(TransformerLMSequential, self).__init__(*layers)
def make_model(args, device, ntokens):
ninp = 2048 # embedding dimension
nhid = 2048 # the dimension of the feedforward network model in nn.TransformerEncoder
nhead = 32 # the number of heads in the multiheadattention models
dropout = 0
initrange = 0.1
ndecoder = args.num_decoder_layers
model = TransformerLMSequential(ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder).to(device)
criterion = nn.CrossEntropyLoss()
lr = 0.01 # learning rate
def make_adam(model):
return Adam(model.parameters(), lr=lr)
optimizer = make_adam
return model, criterion, optimizer
def train(lm_dataloader, model, criterion, optimizer, vocab_size, args):
model.train()
vocab_size = 10000
total_loss = 0.0
start_time = time.time()
word_counter = 0
optimizer = optimizer(model)
def get_first_device(model):
if model.devices:
return model.devices[0]
else:
return torch.cuda.current_device()
def get_last_device(model):
if model.devices:
return model.devices[-1]
else:
return torch.cuda.current_device()
print('Number of parameters for model: {}'.format(sum(p.numel() for p in model.parameters())))
for i, batch in enumerate(lm_dataloader):
bi = batch["input"]
if args.max_batch and i > args.max_batch:
break
optimizer.zero_grad()
try:
tmp = batch["input"].to(get_first_device(model))
output = model(tmp).local_value()
except Exception as e:
raise RuntimeError(f"training failed on {torch.distributed.get_rank()}") from e
target = batch["target"].to(get_last_device(model))
output = output.to(target.device)
loss = criterion(output.view(-1, vocab_size), target.view(-1))
loss.backward()
del target
del output
torch.nn.utils.clip_grad_value_(model.parameters(), 0.05)
optimizer.step()
total_loss += loss.item()
log_interval = 1
word_counter += batch["ntokens"]
if i % log_interval == 0 and i > 0:
cur_loss = total_loss / log_interval
elapsed = time.time() - start_time
print(
"| batch {:5d} | wps {:5.2f} | loss {:5.2f} | ppl {:8.2f}".format(
i, word_counter / elapsed, cur_loss, math.exp(cur_loss)
)
)
word_counter = 0
total_loss = 0
start_time = time.time()
print('Peak memory usage for GPUs: ', end='')
for i in range(len(model.devices)):
print("cuda:{}: {}, ".format(
i,
sizeof_fmt(torch.cuda.memory_stats(i)["allocated_bytes.all.peak"])), end='')
print()
def generate_balance(num_devices, num_layers):
balance = []
layers_assigned = 0
for i in range(num_devices):
x = (num_layers - layers_assigned) / (num_devices - i)
if x.is_integer():
balance.append(int(x))
layers_assigned += x
else:
balance.append(math.ceil(x))
layers_assigned += math.ceil(x)
return balance
def make_model_and_data(args, device):
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
vocab_size = 10000
model, criterion, optimizer = make_model(args, device, vocab_size)
lm_dataset = BenchmarkLMDataset()
lm_dataloader = DataLoader(
lm_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0, collate_fn=collate_sentences_lm
)
return {
"model": model,
"criterion": criterion,
"optimizer": optimizer,
"data": lm_dataloader,
"vocab_size": vocab_size,
}
def bench_single_process(args):
os.environ.update({"MASTER_ADDR" : args.host})
os.environ.update({"MASTER_PORT" : "10638"})
rpc.init_rpc(
"worker",
rank=0,
world_size=1,
)
num_devices = torch.cuda.device_count() if torch.cuda.is_available() else 1
num_devices = min(args.num_devices, num_devices)
assert num_devices > 0
init_random_seed(0)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
blob = make_model_and_data(args, None)
model = blob["model"]
balance = generate_balance(num_devices, len(model))
model = partition_model(model, balance)
p = Pipe(
model, chunks=args.chunks, checkpoint=args.checkpoint
)
del model
del blob["model"]
train(blob["data"], p, blob["criterion"], blob["optimizer"], blob["vocab_size"], args)
parser = argparse.ArgumentParser(description="benchmark")
parser.add_argument("--host", "-o", type=str, default="localhost", help="hostname")
parser.add_argument("--chunks", type=int, default=4, help="number of microbatches per batch")
parser.add_argument("--batch-size", type=int, default=8, help="size of a batch")
parser.add_argument("--max-batch", type=int, default=10, help="Max number of batches")
parser.add_argument("--num-decoder-layers", type=int, default=10, help="Number of decoder layers in the model")
parser.add_argument(
"--checkpoint", default="except_last", choices=["always", "except_last", "never"],
help="Checkpointing strategy for pipe"
)
parser.add_argument(
"--num-devices", type=int, default=4, help="Number of GPU devices to use"
)
if __name__ == "__main__":
args = parser.parse_args()
print(f"Running benchmark with args: {args}")
bench_single_process(args)
| pytorch-master | benchmarks/distributed/pipeline/pipe.py |
#!/usr/bin/env python3
#
# Measure distributed training iteration time.
#
# This program performs a sweep over a) a number of model architectures, and
# b) an increasing number of processes. This produces a 1-GPU baseline,
# an 8-GPU baseline (if applicable), as well as measurements for however
# many processes can participate in training.
#
import argparse
import itertools
import json
import os
import shlex
import subprocess
import sys
import time
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.optim as optim
import torchvision
def allgather_object(obj):
out = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(out, obj)
return out
def allgather_run(cmd):
proc = subprocess.run(shlex.split(cmd), capture_output=True)
assert(proc.returncode == 0)
return allgather_object(proc.stdout.decode("utf-8"))
def allequal(iterator):
iterator = iter(iterator)
try:
first = next(iterator)
except StopIteration:
return True
return all(first == rest for rest in iterator)
def benchmark_process_group(pg, benchmark, use_ddp_for_single_rank=True):
torch.manual_seed(pg.rank())
torch.cuda.manual_seed(pg.rank())
model = benchmark.create_model()
data = [(benchmark.generate_inputs(), benchmark.generate_target())]
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(
model.parameters(),
0.001,
momentum=0.9,
weight_decay=1e-4)
if use_ddp_for_single_rank or pg.size() > 1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
process_group=pg,
bucket_cap_mb=benchmark.bucket_size)
measurements = []
warmup_iterations = 5
measured_iterations = 10
for (inputs, target) in (data * (warmup_iterations + measured_iterations)):
start = time.time()
output = model(*inputs)
loss = criterion(output, target)
loss.backward()
optimizer.step()
torch.cuda.synchronize()
measurements.append(time.time() - start)
# Throw away measurements for warmup iterations
return measurements[warmup_iterations:]
def run_benchmark(benchmark, ranks, opts):
group = dist.new_group(ranks=ranks, backend=benchmark.distributed_backend)
measurements = []
if dist.get_rank() in set(ranks):
if not opts:
opts = dict()
measurements = benchmark_process_group(group, benchmark, **opts)
dist.destroy_process_group(group)
dist.barrier()
# Aggregate measurements for better estimation of percentiles
return list(itertools.chain(*allgather_object(measurements)))
def sweep(benchmark):
# Synthesize the set of benchmarks to run.
# This list contain tuples for ("string prefix", [rank...]).
benchmarks = []
def append_benchmark(prefix, ranks, opts=None):
prefix = "%4d GPUs -- %s" % (len(ranks), prefix)
benchmarks.append((prefix, ranks, opts))
def local_print(msg):
if dist.get_rank() == 0:
print(msg, end='', flush=True) # noqa: E999
def print_header():
local_print("\n")
local_print("%22s" % "")
for p in [50, 75, 90, 95]:
local_print("%14s%10s" % ("sec/iter", "ex/sec"))
local_print("\n")
def print_measurements(prefix, nelem, measurements):
measurements = sorted(measurements)
local_print("%8s:" % prefix)
for p in [50, 75, 90, 95]:
v = np.percentile(measurements, p)
local_print(" p%02d: %1.3fs %6d/s" % (p, v, nelem / v))
local_print("\n")
# Every process runs once by themselves to warm up (CUDA init, etc).
append_benchmark(" warmup", [dist.get_rank()], {"use_ddp_for_single_rank": False})
# Single machine baselines
append_benchmark(" no ddp", range(1), {"use_ddp_for_single_rank": False})
append_benchmark(" 1M/1G", range(1))
append_benchmark(" 1M/2G", range(2))
append_benchmark(" 1M/4G", range(4))
# Multi-machine benchmarks
for i in range(1, (dist.get_world_size() // 8) + 1):
append_benchmark(" %dM/8G" % i, range(i * 8))
# Run benchmarks in order of increasing number of GPUs
print_header()
results = []
for prefix, ranks, opts in sorted(benchmarks, key=lambda tup: len(tup[1])):
# Turn range into materialized list.
ranks = list(ranks)
measurements = run_benchmark(benchmark, ranks, opts)
if "warmup" not in prefix:
print_measurements(prefix, benchmark.batch_size, measurements)
results.append({"ranks": ranks, "measurements": measurements})
return results
class Benchmark(object):
def __init__(self, device, distributed_backend, bucket_size):
self.device = device
self.batch_size = 32
self.distributed_backend = distributed_backend
self.bucket_size = bucket_size
def __str__(self):
raise NotImplementedError
def create_model(self):
raise NotImplementedError
def generate_inputs(self):
raise NotImplementedError
def generate_target(self):
raise NotImplementedError
class TorchvisionBenchmark(Benchmark):
def __init__(self, device, distributed_backend, bucket_size, model):
super(TorchvisionBenchmark, self).__init__(
device,
distributed_backend,
bucket_size,
)
self.model = model
def __str__(self):
return "{} with batch size {}".format(self.model, self.batch_size)
def create_model(self):
return torchvision.models.__dict__[self.model]().to(self.device)
def generate_inputs(self):
return [torch.rand([self.batch_size, 3, 224, 224], device=self.device)]
def generate_target(self):
return torch.tensor([1] * self.batch_size, dtype=torch.long, device=self.device)
def main():
parser = argparse.ArgumentParser(description='PyTorch distributed benchmark suite')
parser.add_argument("--rank", type=int, default=os.environ["RANK"])
parser.add_argument("--world-size", type=int, required=True)
parser.add_argument("--distributed-backend", type=str, default="nccl")
parser.add_argument("--bucket-size", type=int, default=25)
parser.add_argument("--master-addr", type=str, required=True)
parser.add_argument("--master-port", type=str, required=True)
parser.add_argument("--model", type=str)
parser.add_argument("--json", type=str, metavar="PATH", help="Write file with benchmark results")
args = parser.parse_args()
num_gpus_per_node = torch.cuda.device_count()
assert num_gpus_per_node == 8, "Expected 8 GPUs per machine"
# The global process group used only for communicating benchmark
# metadata, like measurements. Not for benchmarking itself.
dist.init_process_group(
backend="gloo",
init_method="tcp://{}:{}".format(args.master_addr, args.master_port),
rank=args.rank,
world_size=args.world_size,
)
output = allgather_run("nvidia-smi topo -m")
if not allequal(output):
print('Output of "nvidia-smi topo -m" differs between machines')
sys.exit(1)
if args.rank == 0:
print("-----------------------------------")
print("PyTorch distributed benchmark suite")
print("-----------------------------------")
print("")
print("* PyTorch version: {}".format(torch.__version__))
print("* CUDA version: {}".format(torch.version.cuda))
print("* Distributed backend: {}".format(args.distributed_backend))
print("* Maximum bucket size: {}MB".format(args.bucket_size))
print("")
print("--- nvidia-smi topo -m ---")
print("")
print(output[0])
print("--------------------------")
print("")
torch.cuda.set_device(dist.get_rank() % 8)
device = torch.device('cuda:%d' % (dist.get_rank() % 8))
benchmarks = []
if args.model:
benchmarks.append(
TorchvisionBenchmark(
device=device,
distributed_backend=args.distributed_backend,
bucket_size=args.bucket_size,
model=args.model))
else:
for model in ["resnet50", "resnet101", "resnext50_32x4d", "resnext101_32x8d"]:
benchmarks.append(
TorchvisionBenchmark(
device=device,
distributed_backend=args.distributed_backend,
bucket_size=args.bucket_size,
model=model))
benchmark_results = []
for benchmark in benchmarks:
if args.rank == 0:
print("\nBenchmark: {}".format(str(benchmark)))
result = sweep(benchmark)
benchmark_results.append({
"model": benchmark.model,
"batch_size": benchmark.batch_size,
"result": result,
})
# Write file with benchmark results if applicable
if args.rank == 0 and args.json:
report = {
"pytorch_version": torch.__version__,
"cuda_version": torch.version.cuda,
"distributed_backend": args.distributed_backend,
"bucket_size": args.bucket_size,
"benchmark_results": benchmark_results,
}
with open(args.json, 'w') as f:
json.dump(report, f)
if __name__ == '__main__':
main()
| pytorch-master | benchmarks/distributed/ddp/benchmark.py |
#!/usr/bin/env python3
#
# Computes difference between measurements produced by ./benchmark.py.
#
import argparse
import json
import numpy as np
def load(path):
with open(path, 'r') as f:
return json.load(f)
def main():
parser = argparse.ArgumentParser(description='PyTorch distributed benchmark diff')
parser.add_argument("file", nargs=2)
args = parser.parse_args()
if len(args.file) != 2:
raise RuntimeError("Must specify 2 files to diff")
ja = load(args.file[0])
jb = load(args.file[1])
keys = (set(ja.keys()) | set(jb.keys())) - set(["benchmark_results"])
print("{:20s} {:>20s} {:>20s}".format("", "baseline", "test"))
print("{:20s} {:>20s} {:>20s}".format("", "-" * 20, "-" * 20))
for key in sorted(keys):
va = str(ja.get(key, "-"))
vb = str(jb.get(key, "-"))
print("{:20s} {:>20s} vs {:>20s}".format(key + ":", va, vb))
print("")
ba = ja["benchmark_results"]
bb = jb["benchmark_results"]
for ra, rb in zip(ba, bb):
if ra["model"] != rb["model"]:
continue
if ra["batch_size"] != rb["batch_size"]:
continue
model = ra["model"]
batch_size = int(ra["batch_size"])
name = "{} with batch size {}".format(model, batch_size)
print("Benchmark: {}".format(name))
# Print header
print("")
print("{:>10s}".format(""), end='') # noqa: E999
for _ in [75, 95]:
print("{:>16s}{:>10s}{:>10s}".format("sec/iter", "ex/sec", "diff"), end='') # noqa: E999
print("")
# Print measurements
for (i, (xa, xb)) in enumerate(zip(ra["result"], rb["result"])):
# Ignore round without ddp
if i == 0:
continue
# Sanity check: ignore if number of ranks is not equal
if len(xa["ranks"]) != len(xb["ranks"]):
continue
ngpus = len(xa["ranks"])
ma = sorted(xa["measurements"])
mb = sorted(xb["measurements"])
print("{:>4d} GPUs:".format(ngpus), end='') # noqa: E999
for p in [75, 95]:
va = np.percentile(ma, p)
vb = np.percentile(mb, p)
# We're measuring time, so lower is better (hence the negation)
delta = -100 * ((vb - va) / va)
print(" p{:02d}: {:8.3f}s {:7d}/s {:+8.1f}%".format(p, vb, int(batch_size / vb), delta), end='') # noqa: E999
print("")
print("")
if __name__ == '__main__':
main()
| pytorch-master | benchmarks/distributed/ddp/diff.py |
import functools
import torch
import torch.distributed as dist
import torch.nn as nn
class PythonDDP(nn.Module):
"""
Python only implementation for DistributedDataParallel module.
Unlike the production DistributedDataParallel which relies on many C++ core
utils to manage gradient distribution and reduction. This class implement
all functions in pure Python such as param bucketing, gradient
synchronization and reduction. The only C++ dependency is the common utils:
``dist.all_reduce``
The idea: parallelize gradient calculation and reduction, the same algo as
https://pytorch.org/docs/stable/notes/ddp.html, main steps:
1. Distribute params into list of buckets.
2. Register per-param hook to be invoked when grad is ready during backward
3. In the hook, copy grad to corresponding bucket. If bucket is full, kick
off an async all_reduce operation to calculate average grad.
4. After backward wait for all async ops to be done. Copy reduced grads back
to original places.
Two modes are supported, asynchronous reduction (async_reduction=True) and
synchronous reduction (async_reduction=False) which shares the same algo as
LegacyDistributedDataParallel.
Same as DistributedDataParallel to use this class , a process group needs to
be initiated.
Example::
>>> torch.distributed.init_process_group(
>>> backend='gloo', world_size=N, init_method='...'
>>> )
>>> pg = dist.distributed_c10d._get_default_group()
>>> async_reduction = True
>>> module = ToyModel()
>>> ddp_model = PythonDDP(module, pg, async_reduction)
>>> loss_fn = nn.MSELoss()
>>> optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)
>>> outputs = ddp_model(torch.randn(20, 10).to(rank))
>>> labels = torch.randn(20, 10).to(rank)
>>> loss_fn(outputs, labels).backward()
>>>
>>> # Reduce param grads
>>> ddp_model.all_reduce_grads()
>>> optimizer.step()
>>>
"""
class Bucket:
"""Bucket is a container for list of params. """
def __init__(self, max_buffer_size):
self.param_to_offset = {}
self.buffer = None
self.ready_param_grad_count = 0
self.total_elements = 0
self._MAX_BUFFER_SIZE = max_buffer_size
def __str__(self):
return "Bucket: num_params={}, total_elements={}, ready_param_grad_count={}".format(
len(self.param_to_offset),
self.total_elements,
self.ready_param_grad_count)
def is_full(self):
"""
Returns whether grad for all the params in current bucket are ready
and copied to self.buffer.
"""
assert self.ready_param_grad_count >= 0
assert self.ready_param_grad_count <= len(self.param_to_offset)
return len(self.param_to_offset) == self.ready_param_grad_count
def empty(self):
self.ready_param_grad_count = 0
def try_hold_param(self, param):
"""
Checks whether current bucket has enough buffer to hold the incoming
param. If there is enough space, distribute param into current
bucket and Returns true. Otherwise, returns False.
"""
if self.total_elements + param.numel() <= self._MAX_BUFFER_SIZE :
self.param_to_offset[param] = self.total_elements
self.total_elements += param.numel()
return True
else:
return False
def __init__(self, module, process_group, async_reduction=True, buffer_size=2 ** 22):
super(PythonDDP, self).__init__()
self.module = module
self.process_group = process_group
self.world_size = dist.get_world_size(group=self.process_group)
self.async_reduction = async_reduction
# Holds all_reduce handles, used when async_reduction is True
self.async_handles = set()
# Ensure buffer_size is large enough to hold largest param.
max_numel = max(p.numel() for p in module.parameters())
assert buffer_size > max_numel, "buffer_size: {} should be larger than largest param: {}".format(buffer_size, max_numel)
# Build buckets for params
self.param_to_bucket, self.buckets = self._build_buckets_for_params(buffer_size)
# Register per-parameter hook to be invoked when grad is ready.
for p in self.module.parameters():
assert p.requires_grad
p.register_hook(functools.partial(self._on_param_grad_ready, p))
def _build_buckets_for_params(self, max_buffer_size):
"""
Distributes params into list of buckets. Maintains param -> bucket
mapping. Returns tuple of (param_to_buckets, buckets).
"""
print("_build_buckets_for_params called")
params_to_buckets = {}
buckets = set()
cur_bucket = self.Bucket(max_buffer_size)
total_param = 0
for param in self.module.parameters():
total_param += 1
assert param.requires_grad, "param.requires_grad must be True"
if cur_bucket.try_hold_param(param):
params_to_buckets[param] = cur_bucket
buckets.add(cur_bucket)
else:
new_bucket = self.Bucket(max_buffer_size)
assert new_bucket.try_hold_param(param), "param must be holded in a empty bucket"
params_to_buckets[param] = new_bucket
buckets.add(new_bucket)
cur_bucket = new_bucket
first_param = next(self.module.parameters())
for bucket in buckets:
bucket.buffer = first_param.new(bucket.total_elements)
assert bucket.buffer is not None, 'bucket.buffer should not be None'
print("len(param_to_bucket)={}, len(buckets)={}".format(
len(params_to_buckets), len(buckets)))
# Sanity check to ensure all params are distributed correctly into buckets
total_params_in_buckets = 0
for bucket in buckets:
total_params_in_buckets += len(bucket.param_to_offset)
assert total_param == total_params_in_buckets
return params_to_buckets, buckets
# Callback when param.grad is ready. Note during callback, param.grad won't
# be ready yet, we MUST use the given ''grad'' which would be passed upon
# callback.
def _on_param_grad_ready(self, param, grad):
"""
Callback when grad for param is ready. Copy grad to its corresponding
bucket. When the bucket is full, kickoff an async all_reduce if
async_reduction is set, and adds the resultant handle to
self.async_handles.
.. warning::
Note param.grad isn't set yet. Use the passed grad instead.
"""
# Validate bucket and offset are set.
bucket = self.param_to_bucket.get(param)
assert bucket is not None, "Failed to find bucket for param"
offset = bucket.param_to_offset.get(param)
assert offset is not None, "offset must be set for param"
assert bucket.buffer is not None, "buffer must be allocated"
# Copy grad to bucket, note param.grad isn't ready yet.
sz = param.numel()
assert grad is not None
assert param.requires_grad
assert param.numel() == grad.numel()
bucket.buffer[offset : offset + sz].copy_(grad.detach().view(-1))
bucket.ready_param_grad_count += 1
# Kickoff grad reduction async when bucket is full. This ensures grad
# reduction and other grad calculation runs in parallel.
if self.async_reduction and bucket.is_full():
bucket.buffer.div_(self.world_size)
handle = dist.all_reduce(
bucket.buffer, dist.ReduceOp.SUM, self.process_group, True)
self.async_handles.add(handle)
def forward(self, *inputs, **kwargs):
return self.module(*inputs, **kwargs)
def all_reduce_grads(self):
"""
Reduces all gradients across worker and updates param gradients. The
client should call this func post backward.
If async_reduction is True, waits for all async handles (of all_reduce),
otherwise, kicks off synchrous all_reduce for all buckets.
Once all all buckets are reduced, copy the reduced grads back to their
original parameters. After that, reset all buckets in prep for the next
iteration.
"""
if self.async_reduction:
for handle in self.async_handles:
handle.wait()
self.async_handles.clear()
else:
for bucket in self.buckets:
assert bucket.is_full()
bucket.buffer.div_(self.world_size)
dist.all_reduce(bucket.buffer, dist.ReduceOp.SUM, self.process_group)
# Copy reduced-grad back into original place
for bucket in self.buckets:
assert bucket.is_full()
for cur_p, cur_offset in bucket.param_to_offset.items():
sz = cur_p.numel()
if cur_p.grad is not None:
with torch.no_grad():
cur_p.grad.copy_(bucket.buffer[cur_offset : cur_offset + sz].view_as(cur_p))
else:
cur_p.grad = bucket.buffer[cur_offset : cur_offset + sz].view_as(cur_p).clone()
# Empty bucket for next epoch
for bucket in self.buckets:
bucket.empty()
| pytorch-master | benchmarks/distributed/ddp/compare/python_ddp.py |
"""
A simple tool to compare the performance of different impls of
DistributedDataParallel on resnet50, three flavors:
1. DistributedDataParallel, which has a python wrapper and C++ core to do
gradient distribution and reduction. It's current production version.
2. PythonDDP with async gradient reduction.
3. PythonDDP with synchrous gradient reduction.
Example::
>>> modify configs in main func
>>> python compare_ddp.py
>>> Sample out: compare_ddp_sample.md
"""
import numpy as np
import os
import pickle
import glob
import python_ddp
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
import torchvision.models as models
from collections import OrderedDict
from enum import Enum
from tabulate import tabulate
from torch.nn.parallel import DistributedDataParallel as DDP
class DDPOption(Enum):
DDP_CPP_CORE = 1
PYTHON_DDP_SYNC_REDUCTION = 2
PYTHON_DDP_ASYNC_REDUCTION = 3
class LatencyData:
__slots__ = ["buffer_size_in_M", "ddp_option", "rank", "metrics"]
def __init__(self, buffer_size_in_M, ddp_option, rank, metrics):
self.buffer_size_in_M = buffer_size_in_M
self.ddp_option = ddp_option
self.rank = rank
self.metrics = metrics
def serialize(buffer_size_in_M, ddp_option, rank, metrics,
data_dir="./tmp", ext="ddpraw"):
if not os.path.exists(data_dir):
print(f'{data_dir} not exist, mkdir {data_dir}')
os.mkdir(data_dir)
file_name = "buffer_size_{}M_rank{}_{}.{}".format(
buffer_size_in_M, rank, ddp_option, ext)
file_path = os.path.join(data_dir, file_name)
print("Writing metrics to file: '{}'".format(file_path))
data = LatencyData(buffer_size_in_M, ddp_option, rank, metrics)
with open(file_path, "wb") as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
print(f"Wrote metrics to '{file_path}''")
def load_detailed_metrics(data_dir="./tmp", ext="ddpraw"):
assert os.path.exists(data_dir)
file_pattern = os.path.join(data_dir, f"*.{ext}")
files = glob.glob(file_pattern)
print("load_detailed_metrics found {} files".format(len(files)))
buffer_size_to_metrics = OrderedDict()
for file_path in files:
with open(file_path, "rb") as f:
data = pickle.load(f)
# Add data to buffer_size_to_metrics
buffer_size = data.buffer_size_in_M
if buffer_size not in buffer_size_to_metrics:
buffer_size_to_metrics[buffer_size] = {}
metrics = buffer_size_to_metrics.get(buffer_size)
assert metrics is not None
metrics[data.ddp_option] = data.metrics
return buffer_size_to_metrics
def setup(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
# initialize the process group
dist.init_process_group("gloo", rank=rank, world_size=world_size)
def create_ddp_model(module, rank, pg, ddp_option, buffer_size_in_M):
"""Helper to create DDPModel. """
if ddp_option == DDPOption.DDP_CPP_CORE:
ddp_model = DDP(module, device_ids=[rank],
process_group=pg,
bucket_cap_mb=buffer_size_in_M)
ddp_model._set_static_graph()
return ddp_model
elif ddp_option == DDPOption.PYTHON_DDP_SYNC_REDUCTION:
M = 2 ** 20
return python_ddp.PythonDDP(module, pg, False, buffer_size=buffer_size_in_M * M)
elif ddp_option == DDPOption.PYTHON_DDP_ASYNC_REDUCTION:
M = 2 ** 20
return python_ddp.PythonDDP(module, pg, True, buffer_size=buffer_size_in_M * M)
else:
raise NotImplementedError
def run_ddp(rank, world_size, epochs, ddp_option, buffer_size_in_M, warmup_iterations=20):
print(f'Invoked run_ddp rank {rank}')
assert epochs > warmup_iterations
# Setup
print("setting up ... ")
setup(rank, world_size)
torch.manual_seed(rank)
torch.cuda.manual_seed(rank)
device = torch.device('cuda:%d' % rank)
print('setup done')
# Create ResNet50 module and wrap in DDP module.
pg = dist.distributed_c10d._get_default_group()
model = models.resnet50().to(device)
ddp_model = create_ddp_model(model, rank, pg, ddp_option, buffer_size_in_M)
assert ddp_model is not None
loss_fn = nn.MSELoss()
optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)
# Container to hold: event -> list of events in milliseconds
MODEL_FORWARD = "forward"
MODEL_BACKWARD = "backward"
metrics = {MODEL_FORWARD: [], MODEL_BACKWARD: []}
for epoch in range(epochs):
if epoch % 10 == 0:
print(f'Epoch {epoch}/{epochs} ...')
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
# TODO(bowangbj): Switch to real training set from ImageNet.
inputs = torch.rand([32, 3, 224, 224], device=device)
labels = torch.rand([32, 1000], device=device)
# Forward
start.record()
outputs = ddp_model(inputs)
loss = loss_fn(outputs, labels)
end.record()
torch.cuda.synchronize()
if epoch >= warmup_iterations:
metrics[MODEL_FORWARD].append(start.elapsed_time(end))
# Backward
start.record()
loss.backward()
# Reduce all grad, this is needed for non-DDP_CPP_CORE since the hook
# for all_reduce does not exist yet.
if ddp_option != DDPOption.DDP_CPP_CORE:
ddp_model.all_reduce_grads()
end.record()
torch.cuda.synchronize()
if epoch >= warmup_iterations:
metrics[MODEL_BACKWARD].append(start.elapsed_time(end))
# Optimization
optimizer.step()
optimizer.zero_grad()
if rank == 0:
print(f"\nMetrics for GPU {rank}, ddp_option={ddp_option}, buffer_size={buffer_size_in_M}M")
print(f"Skipped {warmup_iterations} CUDA warmpup iterations. ")
for step, elapsed_milliseconds in metrics.items():
A = np.array(elapsed_milliseconds)
print(' {N} iterations, {step}, mean={mean} ms, median={median} ms, p90={p90} ms, p99={p99} ms'.format(
N=len(A), step=step, mean=np.mean(A),
median=np.percentile(A, 50), p90=np.percentile(A, 90),
p99=np.percentile(A, 99)))
# Serialize the raw data to be used to compute summary. Didn't choose to
# maintain a global object holding the metrics b/c mp.spawn tries to
# fork all the arguments before spawning new process thus it's infeasible
# save global states in an object.
serialize(buffer_size_in_M, ddp_option, rank, metrics)
def append_delta(row_list, base, exp):
percent = 100 * ((exp - base) / base)
row_list.append(percent)
def print_summary(buffer_size_to_metrics):
# metrics: {ddp_option, Metrics}
# Metrics: step -> [latency]
for buffer_size, metrics in buffer_size_to_metrics.items():
assert DDPOption.DDP_CPP_CORE in metrics.keys()
baseline = metrics.get(DDPOption.DDP_CPP_CORE)
print(f"=== Summary for buffer_size: {buffer_size}M === ")
for step in baseline.keys():
# step takes value from [forward, backward]
# compute latency for each step into a table, each row is looks like
# [option, mean, diff, mean, diff, p90, diff, p95, diff, p99, diff]
data = []
baseline_latencies = baseline.get(step)
assert baseline_latencies is not None
A_baseline = np.array(baseline_latencies)
for ddp_option, exp_metrics in metrics.items():
exp_latencies = exp_metrics.get(step)
assert exp_latencies is not None
A_exp = np.array(exp_latencies)
# Yield option, mean, p50, p90, p95, p99 and delta.
row = [ddp_option]
row.append(np.mean(A_exp))
append_delta(row, np.mean(A_baseline), np.mean(A_exp))
for px in [50, 90, 95, 99]:
base = np.percentile(A_baseline, px)
exp = np.percentile(A_exp, px)
row.append(exp)
append_delta(row, base, exp)
data.append(row)
# Output buffer_size, step as a table.
print(tabulate(data,
headers=[f"DDP: [{step}]", "Mean", "delta%",
"mean", "delta%", "p90", "delta%",
"p95", "delta%%", "p99", "delta%"]))
print("\n")
def main():
world_size = 2
epochs = 120
# resnet50 model facts:
# total_param_count = 161
# total_elements = 25557032 ~= 24.37M
# param_max_elements = 2359296 ~= 2.25M
# Try different bucket sizes.
buffer_size_in_mbs = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]
print("buffer_size_in_mbs: " + str(buffer_size_in_mbs))
for buffer_size_in_M in buffer_size_in_mbs:
print("\n\n=== NEW EXPERIMENT: buffer_size={}M, {} epochs, world_size={} ===".format(
buffer_size_in_M, epochs, world_size))
options = [
DDPOption.DDP_CPP_CORE,
DDPOption.PYTHON_DDP_ASYNC_REDUCTION,
DDPOption.PYTHON_DDP_SYNC_REDUCTION
]
for option in options:
print("Measuring option: {} ... ".format(option))
mp.spawn(run_ddp,
args=(world_size, epochs, option, buffer_size_in_M),
nprocs=world_size,
join=True)
print("\n Generating summaries ... ")
buffer_size_to_metrics = load_detailed_metrics(data_dir="./tmp", ext="ddpraw")
print_summary(buffer_size_to_metrics)
if __name__ == "__main__" :
main()
| pytorch-master | benchmarks/distributed/ddp/compare/compare_ddp.py |
import torch
RPC_SPARSE = "rpc_sparse"
RPC_DENSE = "rpc_dense"
def sparse_tensor_to_rpc_format(sparse_tensor):
r"""
A helper function creates a list containing the indices, values, and size
of a coalesced sparse tensor.
Args:
sparse_tensor (torch.Tensor): sparse_coo_tensor represented as a list
"""
sparse_tensor = sparse_tensor.coalesce()
return [sparse_tensor.indices(), sparse_tensor.values(), sparse_tensor.size()]
def sparse_rpc_format_to_tensor(sparse_rpc_format):
r"""
A helper function creates a sparse_coo_tensor from indices, values, and size.
Args:
sparse_rpc_format (list): sparse_coo_tensor represented as a list
"""
return torch.sparse_coo_tensor(
sparse_rpc_format[0], sparse_rpc_format[1], sparse_rpc_format[2]
).coalesce()
def process_bucket_with_remote_server(state, bucket):
r"""
Processes a gradient bucket passed by a DDP communication hook
during .backward(). The method supports processing sparse and dense
tensors. It records RPC future completion time metric for the trainer.
Args:
state (object): maintains state during the training process
bucket (GradBucket): gradient bucket
"""
cref = state.cref
tensor = bucket.buffer()
if not cref.use_cuda_rpc:
tensor = tensor.cpu()
sparse = tensor.is_sparse
if sparse:
tensor = sparse_tensor_to_rpc_format(tensor)
b_index = bucket.get_index()
server_args = [
cref.server_rref,
state.batch_number,
b_index,
tensor
]
key = state.get_key(b_index)
cref.record_start(
"hook_future_metric",
key,
RPC_SPARSE if sparse else RPC_DENSE
)
fut = cref.server_rref.rpc_async().average_gradient(*server_args)
def callback(fut):
cref.record_end("hook_future_metric", key)
tensor = fut.wait()
if type(tensor) is list:
tensor = sparse_rpc_format_to_tensor(tensor)
tensor = tensor.cuda(cref.rank)
return [tensor]
return fut.then(callback)
| pytorch-master | benchmarks/distributed/rpc/parameter_server/utils.py |
import argparse
import json
import os
from pathlib import Path
from data import data_map
from metrics.ProcessedMetricsPrinter import ProcessedMetricsPrinter
from models import model_map
from server import server_map
from trainer import (
criterion_map,
ddp_hook_map,
ddp_model_map,
hook_state_map,
iteration_step_map,
preprocess_data_map,
trainer_map,
)
import torch
import torch.distributed as c10d
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
from torch.distributed.rpc import TensorPipeRpcBackendOptions
from torch.futures import wait_all
from torch.utils.data import DataLoader
def get_name(rank, args):
r"""
A function that gets the name for the rank
argument
Args:
rank (int): process number in the world
args (parser): benchmark configurations
"""
t_count = args.ntrainer + args.ncudatrainer
s_count = args.nserver + args.ncudaserver
if rank < t_count:
return f"trainer{rank}"
elif rank < (t_count + s_count):
return f"server{rank}"
else:
return "master"
def get_server_rank(args, rank):
r"""
A function that gets the server rank for
the rank argument.
Args:
args (parser): benchmark configurations
rank (int): trainer rank
"""
s_offset = args.ntrainer + args.ncudatrainer
tps = args.ntrainer // args.nserver
return rank // tps + s_offset
def get_cuda_server_rank(args, rank):
r"""
A function that gets the cudaserver rank for
the rank argument.
Args:
args (parser): benchmark configurations
rank (int): trainer rank
"""
s_offset = args.ntrainer + args.ncudatrainer + args.nserver
t_index = rank - args.ntrainer
ctps = args.ncudatrainer // args.ncudaserver
return t_index // ctps + s_offset
def get_server_rref(server_rank, args, extra_args):
r"""
A function that creates a RRef to the server.
Args:
server_rank (int): process number in the world
args (parser): benchmark configurations
extra_args (dict): configurations added by the user
"""
server = server_map[args.server]
name = get_name(
server_rank,
args
)
if extra_args is not None:
server_args = extra_args.values()
else:
server_args = []
if server_rank >= args.ntrainer + args.ncudatrainer + args.nserver:
trainer_count = args.ncudatrainer / args.ncudaserver
use_cuda_rpc = True
else:
trainer_count = args.ntrainer / args.nserver
use_cuda_rpc = False
return rpc.remote(
name,
server,
args=(
server_rank,
trainer_count,
use_cuda_rpc,
*server_args,
),
)
def run_trainer(
args, extra_args, data, rank, server_rref
):
r"""
A function that runs obtains a trainer instance and calls
the train method.
Args:
args (parser): benchmark configurations
extra_args (dict): configurations added by the user
data (list): training samples
rank (int): process number in the world
server_rrefs (dict): a dictionary containing server RRefs
"""
trainer_class = trainer_map[args.trainer]
if extra_args is not None:
trainer_args = extra_args.values()
else:
trainer_args = []
trainer_count = args.ntrainer + args.ncudatrainer
store = c10d.FileStore(args.filestore, trainer_count)
if args.backend == "gloo":
process_group = c10d.ProcessGroupGloo(
store, rank, trainer_count
)
elif args.backend == "nccl":
process_group = c10d.ProcessGroupNCCL(
store, rank, trainer_count
)
elif args.backend == "multi":
process_group = c10d.ProcessGroupNCCL(
store, rank, trainer_count
)
if c10d.is_initialized() is False:
c10d.init_process_group(backend="gloo", rank=rank, world_size=trainer_count)
model = load_model(args)
preprocess_data = preprocess_data_map[args.preprocess_data]
create_criterion = criterion_map[args.create_criterion]
create_ddp_model = ddp_model_map[args.create_ddp_model]
iteration_step = iteration_step_map[args.iteration_step]
hook_state_class = hook_state_map[args.hook_state]
hook = ddp_hook_map[args.ddp_hook]
# check if this a cudatrainer
use_cuda_rpc = rank >= args.ntrainer
trainer = trainer_class(
process_group,
use_cuda_rpc,
server_rref,
args.backend,
args.epochs,
preprocess_data,
create_criterion,
create_ddp_model,
hook_state_class,
hook,
iteration_step,
*trainer_args
)
trainer.train(model, data)
metrics = trainer.get_metrics()
return [rank, metrics]
def call_trainers(args, extra_args, train_data, server_rrefs):
r"""
A function that starts the trainers. Each trainer is started
using an rpc_async request.
Args:
args (parser): benchmark configurations
extra_args (dict): configurations added by the user
train_data (list): training samples
server_rrefs (dict): a dictionary containing server RRefs
"""
futs = []
for trainer_rank in range(0, args.ntrainer + args.ncudatrainer):
trainer_name = get_name(
trainer_rank,
args
)
server_rref = None
if server_rrefs:
if trainer_rank >= args.ntrainer:
server_rank = get_cuda_server_rank(args, trainer_rank)
else:
server_rank = get_server_rank(args, trainer_rank)
server_rref = server_rrefs[server_rank]
fut = rpc.rpc_async(
trainer_name,
run_trainer,
args=(
args,
extra_args,
train_data[trainer_rank],
trainer_rank,
server_rref,
),
timeout=args.rpc_timeout
)
futs.append(fut)
return futs
def benchmark_warmup(
args, extra_args, data, server_rrefs
):
r"""
A function that runs the training algorithm. The goal of this
function is to warm the rpc. The server states are reset.
Args:
args (parser): benchmark configurations
extra_args (dict): configurations added by the user
data (list): training samples
server_rrefs (dict): a dictionary containing server RRefs
"""
futs = call_trainers(args, extra_args, data, server_rrefs)
wait_all(futs)
for server_rref in server_rrefs.values():
server_rref.rpc_sync().reset_state(server_rref)
print("benchmark warmup done\n")
def split_list(arr, n):
r"""
A function that splits a list into n lists
Args:
arr (list): training samples
n (int): number of output lists
"""
return [arr[i::n] for i in range(n)]
def get_server_metrics(server_rrefs):
r"""
A function that calls the remote server to obtain metrics
collected during the benchmark run.
Args:
server_rrefs (dict): a dictionary containing server RRefs
"""
rank_metrics = []
for rank, server_rref in server_rrefs.items():
metrics = server_rref.rpc_sync().get_metrics(server_rref)
rank_metrics.append([rank, metrics])
return rank_metrics
def run_master(rank, data, args, extra_configs, rpc_backend_options):
r"""
A function that runs the master process in the world. This function
obtains remote references to initialized servers, splits the data,
runs the trainers, and prints metrics.
Args:
rank (int): process number in the world
data (list): training samples
args (parser): benchmark configurations
extra_configs (dict): configurations added by the user
rpc_backend_options (rpc): configurations/options for the rpc TODO: fix
"""
world_size = args.ntrainer + args.ncudatrainer + args.nserver + args.ncudaserver + 1
rpc.init_rpc(
get_name(
rank,
args
),
rank=rank,
world_size=world_size,
rpc_backend_options=rpc_backend_options
)
server_rrefs = {}
for i in range(
args.ntrainer + args.ncudatrainer, world_size - 1
):
server_rrefs[i] = get_server_rref(i, args, extra_configs["server_config"])
train_data = split_list(
list(DataLoader(data, batch_size=args.batch_size)),
args.ntrainer + args.ncudatrainer
)
# warmup run the benchmark
benchmark_warmup(
args, extra_configs["trainer_config"], train_data, server_rrefs
)
# run the benchmark
trainer_futs = call_trainers(
args, extra_configs["trainer_config"], train_data, server_rrefs
)
# collect metrics and print
metrics_printer = ProcessedMetricsPrinter()
rank_metrics_list = wait_all(trainer_futs)
metrics_printer.print_metrics("trainer", rank_metrics_list)
rank_metrics_list = get_server_metrics(server_rrefs)
metrics_printer.print_metrics("server", rank_metrics_list)
def run_benchmark(rank, args, data):
r"""
A function that runs the benchmark.
Args:
rank (int): process number in the world
args (parser): configuration args
data (list): training samples
"""
config = load_extra_configs(args)
torch.manual_seed(args.torch_seed)
torch.cuda.manual_seed_all(args.cuda_seed)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
world_size = args.ntrainer + args.ncudatrainer + args.nserver + args.ncudaserver + 1
os.environ['MASTER_ADDR'] = args.master_addr
os.environ['MASTER_PORT'] = args.master_port
rpc_backend_options = TensorPipeRpcBackendOptions(rpc_timeout=args.rpc_timeout)
if rank == world_size - 1:
# master = [ntrainer + ncudatrainer + nserver + ncudaserver, ntrainer + ncudatrainer + nserver + ncudaserver]
run_master(rank, data, args, config, rpc_backend_options)
elif rank >= args.ntrainer + args.ncudatrainer:
# parameter_servers = [ntrainer + ncudatrainer, ntrainer + ncudatrainer + nserver + ncudaserver)
rpc.init_rpc(
get_name(
rank,
args
),
rank=rank,
world_size=world_size,
rpc_backend_options=rpc_backend_options
)
else:
# trainers = [0, ntrainer + ncudatrainer)
if rank >= args.ntrainer:
server_rank = get_cuda_server_rank(args, rank)
server_name = get_name(server_rank, args)
rpc_backend_options.set_device_map(
server_name,
{rank: server_rank}
)
trainer_name = get_name(
rank,
args
)
rpc.init_rpc(
trainer_name,
rank=rank,
world_size=world_size,
rpc_backend_options=rpc_backend_options
)
rpc.shutdown()
def get_json_config(file_name, id):
r"""
A function that loads a json configuration from a file.
Args:
file_name (str): name of configuration file to load
id (str): configuration that will be loaded
"""
with open(os.path.join(Path(__file__).parent, file_name), "r") as f:
json_config = json.load(f)[id]
return json_config
def load_extra_configs(args):
r"""
A function that creates a dictionary that contains any extra configurations
set by the user. The dictionary will contain two keys trainer_config and
server_config, with default values None.
Args:
args (parser): launcher configurations
"""
trainer_config_file = args.trainer_config_path
server_config_file = args.server_config_path
configurations = {
"trainer_config": None,
"server_config": None
}
if args.trainer is not None and trainer_config_file is not None:
configurations["trainer_config"] = get_json_config(trainer_config_file, args.trainer)
if args.server is not None and server_config_file is not None:
configurations["server_config"] = get_json_config(server_config_file, args.server)
return configurations
def load_data(args):
r"""
A function that creates an instance of the data class.
Args:
args (parser): launcher configurations
"""
data_config_file = args.data_config_path
data_config = get_json_config(data_config_file, args.data)
data_class = data_map[data_config["data_class"]]
return data_class(**data_config["configurations"])
def load_model(args):
r"""
A function that creates an instance of the model class.
Args:
args (parser): launcher configurations
"""
model_config_file = args.model_config_path
model_config = get_json_config(model_config_file, args.model)
model_class = model_map[model_config["model_class"]]
return model_class(**model_config["configurations"])
def main(args):
r"""
A function that creates multiple processes to run the benchmark.
Args:
args (parser): launcher configurations
"""
# CPU and RPC trainer checks
if args.ntrainer > 0 and args.ncudatrainer > 0:
assert args.nserver > 0 and args.ncudaserver > 0
if args.nserver > 0:
assert args.ntrainer > 0
assert args.ntrainer % args.nserver == 0
if args.ncudaserver > 0:
assert args.ncudatrainer > 0
assert args.ncudatrainer % args.ncudaserver == 0
world_size = (
args.ntrainer + args.ncudatrainer + args.nserver + args.ncudaserver + 1
)
data = load_data(args)
mp.spawn(
run_benchmark,
args=(
args,
data,
),
nprocs=world_size,
join=True
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="RPC server Benchmark")
parser.add_argument(
"--master_addr",
type=str,
help="IP address of the machine that will host the process with rank 0"
)
parser.add_argument(
"--master_port",
type=str,
help="A free port on the machine that will host the process with rank 0"
)
parser.add_argument(
"--trainer",
type=str,
help="trainer map key to get trainer class for benchmark run"
)
parser.add_argument(
"--ntrainer",
type=int,
help="trainer count for benchmark run"
)
parser.add_argument(
"--ncudatrainer",
type=int,
help="cudatrainer count for benchmark run"
)
parser.add_argument(
"--filestore",
type=str,
help="filestore location for process group"
)
parser.add_argument(
"--server",
type=str,
help="server map key to get trainer class for benchmark run"
)
parser.add_argument(
"--nserver",
type=int,
help="server count for benchmark run"
)
parser.add_argument(
"--ncudaserver",
type=int,
help="cudaserver count for benchmark run"
)
parser.add_argument(
"--rpc_timeout",
type=int,
help="timeout in seconds to use for RPC"
)
parser.add_argument(
"--backend",
type=str,
help="distributed communication backend to use for benchmark run"
)
parser.add_argument(
"--epochs",
type=int,
help="epoch count for training"
)
parser.add_argument(
"--batch_size",
type=int,
help="number of training examples used in one iteration"
)
parser.add_argument(
"--data",
type=str,
help="id for data configuration"
)
parser.add_argument(
"--model",
type=str,
help="id for model configuration"
)
parser.add_argument(
"--data_config_path",
type=str,
help="path to data configuration file"
)
parser.add_argument(
"--model_config_path",
type=str,
help="path to model configuration file"
)
parser.add_argument(
"--server_config_path",
type=str,
help="path to server configuration file"
)
parser.add_argument(
"--trainer_config_path",
type=str,
help="path to trainer configuration file"
)
parser.add_argument(
"--torch_seed",
type=int,
help="seed for generating random numbers to a non-deterministic random number"
)
parser.add_argument(
"--cuda_seed",
type=int,
help="seed for generating random numbers to a random number for the current GPU"
)
parser.add_argument(
"--preprocess_data",
type=str,
help="this function will be used to preprocess data before training"
)
parser.add_argument(
"--create_criterion",
type=str,
help="this function will be used to create the criterion used for model loss calculation"
)
parser.add_argument(
"--create_ddp_model",
type=str,
help="this function will be used to create the ddp model used during training"
)
parser.add_argument(
"--hook_state",
type=str,
help="this will be the state class used when registering the ddp communication hook"
)
parser.add_argument(
"--ddp_hook",
type=str,
default="allreduce_hook",
help="ddp communication hook"
)
parser.add_argument(
"--iteration_step",
type=str,
help="this will be the function called for each iteration of training"
)
args = parser.parse_args()
print(f"{args}\n")
main(args)
| pytorch-master | benchmarks/distributed/rpc/parameter_server/launcher.py |
import statistics
import pandas as pd
from tabulate import tabulate
class ProcessedMetricsPrinter:
def print_data_frame(self, name, processed_metrics):
print(f"metrics for {name}")
data_frame = self.get_data_frame(processed_metrics)
print(tabulate(data_frame, showindex=False, headers=data_frame.columns, tablefmt="grid"))
def combine_processed_metrics(self, processed_metrics_list):
r"""
A method that merges the value arrays of the keys in the dictionary
of processed metrics.
Args:
processed_metrics_list (list): a list containing dictionaries with
recorded metrics as keys, and the values are lists of elapsed times.
Returns::
A merged dictionary that is created from the list of dictionaries passed
into the method.
Examples::
>>> instance = ProcessedMetricsPrinter()
>>> dict_1 = trainer1.get_processed_metrics()
>>> dict_2 = trainer2.get_processed_metrics()
>>> print(dict_1)
{
"forward_metric_type,forward_pass" : [.0429, .0888]
}
>>> print(dict_2)
{
"forward_metric_type,forward_pass" : [.0111, .0222]
}
>>> processed_metrics_list = [dict_1, dict_2]
>>> result = instance.combine_processed_metrics(processed_metrics_list)
>>> print(result)
{
"forward_metric_type,forward_pass" : [.0429, .0888, .0111, .0222]
}
"""
processed_metric_totals = {}
for processed_metrics in processed_metrics_list:
for metric_name, values in processed_metrics.items():
if metric_name not in processed_metric_totals:
processed_metric_totals[metric_name] = []
processed_metric_totals[metric_name] += values
return processed_metric_totals
def get_data_frame(self, processed_metrics):
df = pd.DataFrame(
columns=['name', 'min', 'max', 'mean', 'variance', 'stdev']
)
for metric_name in sorted(processed_metrics.keys()):
values = processed_metrics[metric_name]
row = {
"name": metric_name,
"min": min(values),
"max": max(values),
"mean": statistics.mean(values),
"variance": statistics.variance(values),
"stdev": statistics.stdev(values)
}
df = df.append(row, ignore_index=True)
return df
def print_metrics(self, name, rank_metrics_list):
if rank_metrics_list:
metrics_list = []
for rank, metric in rank_metrics_list:
self.print_data_frame(f"{name}={rank}", metric)
metrics_list.append(metric)
combined_metrics = self.combine_processed_metrics(metrics_list)
self.print_data_frame(f"all {name}", combined_metrics)
def save_to_file(self, data_frame, file_name):
file_name = f"data_frames/{file_name}.csv"
data_frame.to_csv(file_name, encoding='utf-8', index=False)
| pytorch-master | benchmarks/distributed/rpc/parameter_server/metrics/ProcessedMetricsPrinter.py |
from .CPUMetric import CPUMetric
from .CUDAMetric import CUDAMetric
class MetricsLogger:
def __init__(self, rank=None):
self.rank = rank
self.metrics = {}
def record_start(self, type, key, name, cuda):
if type in self.metrics and key in self.metrics[type]:
raise RuntimeError(f"metric_type={type} with key={key} already exists")
if cuda:
if self.rank is None:
raise RuntimeError("rank is required for cuda")
metric = CUDAMetric(self.rank, name)
else:
metric = CPUMetric(name)
if type not in self.metrics:
self.metrics[type] = {}
self.metrics[type][key] = metric
metric.record_start()
def record_end(self, type, key):
if type not in self.metrics or key not in self.metrics[type]:
raise RuntimeError(f"metric_type={type} with key={key} not found")
if self.metrics[type][key].get_end() is not None:
raise RuntimeError(f"end for metric_type={type} with key={key} already exists")
self.metrics[type][key].record_end()
def clear_metrics(self):
self.metrics.clear()
def get_metrics(self):
return self.metrics
def get_processed_metrics(self):
r"""
A method that processes the metrics recorded during the benchmark.
Returns::
It returns a dictionary containing keys as the metrics
and values list of elapsed times.
Examples::
>>> instance = MetricsLogger(rank)
>>> instance.cuda_record_start("forward_metric_type", "1", "forward_pass")
>>> instance.cuda_record_end("forward_metric_type", "1")
>>> instance.cuda_record_start("forward_metric_type", "2", "forward_pass")
>>> instance.cuda_record_end("forward_metric_type", "2")
>>> print(instance.metrics)
{
"forward_metric_type": {
"1": metric1,
"2": metric2
}
}
>>> print(instance.get_processed_metrics())
{
"forward_metric_type,forward_pass" : [.0429, .0888]
}
"""
processed_metrics = {}
for metric_type in self.metrics.keys():
for metric_key in self.metrics[metric_type].keys():
metric = self.metrics[metric_type][metric_key]
if isinstance(metric, CUDAMetric):
metric.synchronize()
metric_name = metric.get_name()
elapsed_time = metric.elapsed_time()
processed_metric_name = f"{metric_type},{metric_name}"
if processed_metric_name not in processed_metrics:
processed_metrics[processed_metric_name] = []
processed_metrics[processed_metric_name].append(elapsed_time)
return processed_metrics
| pytorch-master | benchmarks/distributed/rpc/parameter_server/metrics/MetricsLogger.py |
import time
from .MetricBase import MetricBase
class CPUMetric(MetricBase):
def __init__(self, name: str):
self.name = name
self.start = None
self.end = None
def record_start(self):
self.start = time.time()
def record_end(self):
self.end = time.time()
def elapsed_time(self):
if self.start is None:
raise RuntimeError("start is None")
if self.end is None:
raise RuntimeError("end is None")
return self.end - self.start
| pytorch-master | benchmarks/distributed/rpc/parameter_server/metrics/CPUMetric.py |
from abc import ABC, abstractmethod
class MetricBase(ABC):
def __init__(self, name):
self.name = name
self.start = None
self.end = None
@abstractmethod
def record_start(self):
return
@abstractmethod
def record_end(self):
return
@abstractmethod
def elapsed_time(self):
return
def get_name(self):
return self.name
def get_end(self):
return self.end
| pytorch-master | benchmarks/distributed/rpc/parameter_server/metrics/MetricBase.py |
import torch
from .MetricBase import MetricBase
class CUDAMetric(MetricBase):
def __init__(self, rank: int, name: str):
self.rank = rank
self.name = name
self.start = None
self.end = None
def record_start(self):
self.start = torch.cuda.Event(enable_timing=True)
with torch.cuda.device(self.rank):
self.start.record()
def record_end(self):
self.end = torch.cuda.Event(enable_timing=True)
with torch.cuda.device(self.rank):
self.end.record()
def elapsed_time(self):
if not self.start.query():
raise RuntimeError("start event did not complete")
if not self.end.query():
raise RuntimeError("end event did not complete")
return self.start.elapsed_time(self.end)
def synchronize(self):
self.start.synchronize()
self.end.synchronize()
| pytorch-master | benchmarks/distributed/rpc/parameter_server/metrics/CUDAMetric.py |
import functools
import threading
import time
from abc import ABC, abstractmethod
from metrics.MetricsLogger import MetricsLogger
from utils import sparse_rpc_format_to_tensor, sparse_tensor_to_rpc_format
import torch
import torch.distributed.rpc as rpc
class ParameterServerBase(ABC):
PARAMETER_SERVER_BATCH_METRIC = "parameter_server_batch_metric"
PARAMETER_SERVER_STRAGGLER_METRIC = "parameter_server_straggler_metric"
PARAM_INDEX_STRAGGLER = "param_index_straggler"
PARAM_INDEX_BATCH = "param_index_batch"
def __init__(self, rank):
r"""
Inits ParameterServerBase class.
Args:
rank (int): worker rank
"""
self.__metrics_logger = MetricsLogger(rank)
@abstractmethod
def process_gradient(self):
r"""
A method to be implemented by child class that will process a
gradient received by a server.
"""
return
@staticmethod
@abstractmethod
def average_gradient():
r"""
A method to be implemented by child class that will average
gradients.
"""
return
@staticmethod
@abstractmethod
def reset_state():
r"""
A method to be implemented by child class that will reset
the server state.
"""
return
def record_start(self, type, key, name, cuda=True):
r"""
A method that records the start event for a metric.
Args:
type (str): group id for metric
key (str): unique id for metric within a group
name (str): description of the metric
cuda (bool): indicator to determine if this is a CUDA metric
"""
self.__metrics_logger.record_start(
type,
key,
name,
cuda
)
def record_end(self, type, key):
r"""
A method that records the end event for a metric
Args:
type (str): group id for metric
key (str): unique id for metric within a group
"""
self.__metrics_logger.record_end(
type,
key
)
def record_straggler_start(self, key, cuda=True):
r"""
A helper method that records a straggler metric
for the given key. A user should call this when
the first gradient for the param location is received.
Args:
key (str): unique id for metric within a group
cuda (bool): indicator to determine if this is a CUDA metric
"""
self.__metrics_logger.record_start(
self.PARAMETER_SERVER_STRAGGLER_METRIC,
key,
self.PARAM_INDEX_STRAGGLER,
cuda
)
def record_straggler_end(self, key):
r"""
A helper method that records a straggler metric
for the given key. A user should call this when
the last gradient for the param location is received.
Args:
key (str): unique id for metric within a group
"""
self.__metrics_logger.record_end(
self.PARAMETER_SERVER_STRAGGLER_METRIC,
key
)
def record_batch_start(self, key, cuda=True):
r"""
A helper method that records a batch metric
for the given key. A user should call this when
the first gradient for the param location is received.
Args:
key (str): unique id for metric within a group
cuda (bool): indicator to determine if this is a CUDA metric
"""
self.__metrics_logger.record_start(
self.PARAMETER_SERVER_BATCH_METRIC,
key,
self.PARAM_INDEX_BATCH,
cuda
)
def record_batch_end(self, key):
r"""
A helper method that records a batch metric
for the given key. A user should call this when
all futures for a param location have had their
result set.
Args:
key (str): unique id for metric within a group
"""
self.__metrics_logger.record_end(
self.PARAMETER_SERVER_BATCH_METRIC,
key
)
@staticmethod
def record_method(name, type="method_metric", cuda=True):
r"""
A decorator that records a metric for the decorated method.
Args:
name (str): description of the metric
type (str): group id for metric
cuda (bool): indicator to determine if this is a CUDA metric
"""
def decorator(function):
@functools.wraps(function)
def wrapper(self, *args):
key = time.time()
self.__metrics_logger.record_start(type, key, name, cuda)
result = function(self, *args)
self.__metrics_logger.record_end(type, key)
return result
return wrapper
return decorator
@staticmethod
def get_metrics(server_rref):
r"""
A staticmethod that returns metrics captured by the __metrics_logger.
Args:
server_rref (RRef): remote reference to the server
"""
self = server_rref.local_value()
return self.__metrics_logger.get_processed_metrics()
def clear_metrics(self):
r"""
A method that clears __metrics_logger recorded metrics.
"""
return self.__metrics_logger.clear_metrics()
class AverageParameterServer(ParameterServerBase):
def __init__(
self,
rank,
trainer_count,
use_cuda_rpc
):
r"""
A parameter server that averages the gradients
from trainers for each training iteration step.
Gradients are added as they are received from trainers.
When all gradients have been received, the sum is
divided by the number of trainers.
Args:
rank (int): worker rank
trainer_count (int): count of trainers sending
gradients to the server
use_cuda_rpc (bool): indicator for CUDA RPC
"""
super().__init__(rank)
self.lock = threading.Lock()
self.rank = rank
self.trainer_count = trainer_count
self.use_cuda_rpc = use_cuda_rpc
self.batch_number = 0
self.futures = {}
self.gradient_dict = {}
@staticmethod
def reset_state(server_rref):
r"""
A method that clears the state of the server.
Args:
server_rref (RRef): remote reference to the server
"""
self = server_rref.local_value()
self.batch_number = 0
self.futures.clear()
self.gradient_dict.clear()
self.clear_metrics()
def param_key(self, param_loc):
r"""
A method that returns an encoded key that represents
the current batch and param location.
Args:
param_loc (int): bucket location sent by the trainer
containing the gradient
"""
return f"{self.batch_number},{param_loc}"
def clear_batch_state(self):
r"""
Clears the current server batch state.
"""
self.futures.clear()
self.gradient_dict.clear()
def process_gradient(self, gradient, param_loc):
r"""
Stores the gradient if param_loc is not in gradient_dict.
Adds the gradient to param_loc if it is in gradient_dict.
Args:
gradient (torch.Tensor): tensor sent from trainer
param_loc (int): bucket location sent by the trainer
containing the gradient
"""
if param_loc not in self.gradient_dict:
self.record_straggler_start(self.param_key(param_loc))
self.record_batch_start(self.param_key(param_loc))
self.gradient_dict[param_loc] = gradient
else:
self.gradient_dict[param_loc] += gradient
@ParameterServerBase.record_method(name="average computation")
def average(self, param_loc):
r"""
Obtains the tensor at the param_loc in the gradient_dict
and then divides by number of trainers.
Args:
param_loc (int): bucket location sent by the trainer
containing the gradient
"""
param_loc_avg = self.gradient_dict[param_loc]
param_loc_avg / (1.0 * self.trainer_count)
return param_loc_avg
@staticmethod
@rpc.functions.async_execution
def average_gradient(
server_rref,
received_batch_number,
param_loc,
gradient
):
r"""
An asynchronous function that will average gradients
sent from trainers.
Args:
server_rref (RRef): remote reference to the server
received_batch_number (int): batch number sent by
the trainer
param_loc (int): bucket location sent by the trainer
containing the gradient
gradient (torch.Tensor or list): tensor sent by the trainer
"""
self = server_rref.local_value()
if type(gradient) is list:
gradient = sparse_rpc_format_to_tensor(gradient)
gradient = gradient.cuda(self.rank)
fut = torch.futures.Future()
with self.lock:
if self.batch_number < received_batch_number:
self.batch_number = received_batch_number
self.clear_batch_state()
self.process_gradient(gradient, param_loc)
if param_loc not in self.futures:
self.futures[param_loc] = []
self.futures[param_loc].append(fut)
if len(self.futures[param_loc]) == self.trainer_count:
self.record_straggler_end(self.param_key(param_loc))
param_loc_avg = self.average(param_loc)
if not self.use_cuda_rpc:
param_loc_avg = param_loc_avg.cpu()
if param_loc_avg.is_sparse:
param_loc_avg = sparse_tensor_to_rpc_format(param_loc_avg)
for cur_fut in self.futures[param_loc]:
cur_fut.set_result(param_loc_avg)
self.record_batch_end(self.param_key(param_loc))
return fut
class AverageBatchParameterServer(AverageParameterServer):
def __init__(
self,
rank,
trainer_count,
use_cuda_rpc
):
r"""
A parameter server that averages the gradients
from trainers for each training iteration step.
Gradients are stored and averaged when a gradient
has been received from each trainer for a param
location.
Args:
rank (int): worker rank
trainer_count (int): count of trainers sending
gradients to the server
use_cuda_rpc (bool): indicator for CUDA RPC
"""
super().__init__(rank, trainer_count, use_cuda_rpc)
def process_gradient(self, gradient, param_loc):
r"""
Adds the gradient to param_loc bucket stored in
the gradient_dict.
Args:
gradient (torch.Tensor): tensor sent from trainer
param_loc (int): bucket location sent by the trainer
containing the gradient
"""
if param_loc not in self.gradient_dict:
self.record_straggler_start(self.param_key(param_loc))
self.record_batch_start(self.param_key(param_loc))
self.gradient_dict[param_loc] = []
self.gradient_dict[param_loc].append(gradient)
@ParameterServerBase.record_method(name="average computation")
def average(self, param_loc):
r"""
Sums the gradients at the param_loc then divides by the
number of trainers.
Args:
param_loc (int): bucket location sent by the trainer
containing the gradient
"""
param_loc_avg = self.gradient_dict[param_loc][0]
for gradient in self.gradient_dict[param_loc][1:]:
param_loc_avg += gradient
param_loc_avg / (1.0 * self.trainer_count)
return param_loc_avg
| pytorch-master | benchmarks/distributed/rpc/parameter_server/server/server.py |
from .server import AverageBatchParameterServer, AverageParameterServer
server_map = {
"AverageParameterServer": AverageParameterServer,
"AverageBatchParameterServer": AverageBatchParameterServer
}
| pytorch-master | benchmarks/distributed/rpc/parameter_server/server/__init__.py |
from .DummyModel import DummyModel
model_map = {
"DummyModel": DummyModel
}
| pytorch-master | benchmarks/distributed/rpc/parameter_server/models/__init__.py |
import torch.nn as nn
import torch.nn.functional as F
class DummyModel(nn.Module):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
dense_input_size: int,
dense_output_size: int,
dense_layers_count: int,
sparse: bool
):
r"""
A dummy model with an EmbeddingBag Layer and Dense Layer.
Args:
num_embeddings (int): size of the dictionary of embeddings
embedding_dim (int): the size of each embedding vector
dense_input_size (int): size of each input sample
dense_output_size (int): size of each output sample
dense_layers_count: (int): number of dense layers in dense Sequential module
sparse (bool): if True, gradient w.r.t. weight matrix will be a sparse tensor
"""
super().__init__()
self.embedding = nn.EmbeddingBag(
num_embeddings, embedding_dim, sparse=sparse
)
self.dense = nn.Sequential(*[nn.Linear(dense_input_size, dense_output_size) for _ in range(dense_layers_count)])
def forward(self, x):
x = self.embedding(x)
return F.softmax(self.dense(x), dim=1)
| pytorch-master | benchmarks/distributed/rpc/parameter_server/models/DummyModel.py |
import random
import numpy as np
import torch
from torch.utils.data import Dataset
class DummyData(Dataset):
def __init__(
self,
max_val: int,
sample_count: int,
sample_length: int,
sparsity_percentage: int
):
r"""
A data class that generates random data.
Args:
max_val (int): the maximum value for an element
sample_count (int): count of training samples
sample_length (int): number of elements in a sample
sparsity_percentage (int): the percentage of
embeddings used by the input data in each iteration
"""
self.max_val = max_val
self.input_samples = sample_count
self.input_dim = sample_length
self.sparsity_percentage = sparsity_percentage
def generate_input():
precentage_of_elements = (100 - self.sparsity_percentage) / float(100)
index_count = int(self.max_val * precentage_of_elements)
elements = list(range(self.max_val))
random.shuffle(elements)
elements = elements[:index_count]
data = [
[
elements[random.randint(0, index_count - 1)]
for _ in range(self.input_dim)
]
for _ in range(self.input_samples)
]
return torch.from_numpy(np.array(data))
self.input = generate_input()
self.target = torch.randint(0, max_val, [sample_count])
def __len__(self):
return len(self.input)
def __getitem__(self, index):
return self.input[index], self.target[index]
| pytorch-master | benchmarks/distributed/rpc/parameter_server/data/DummyData.py |
from .DummyData import DummyData
data_map = {
"DummyData": DummyData
}
| pytorch-master | benchmarks/distributed/rpc/parameter_server/data/__init__.py |
def preprocess_dummy_data(rank, data):
r"""
A function that moves the data from CPU to GPU
for DummyData class.
Args:
rank (int): worker rank
data (list): training examples
"""
for i in range(len(data)):
data[i][0] = data[i][0].cuda(rank)
data[i][1] = data[i][1].cuda(rank)
return data
| pytorch-master | benchmarks/distributed/rpc/parameter_server/trainer/preprocess_data.py |
from utils import process_bucket_with_remote_server
import torch
import torch.distributed as c10d
def allreduce_hook(state, bucket):
r"""
A ddp communication hook that uses the process_group allreduce implementation.
Args:
state (object): maintains state during the training process
bucket (GradBucket): gradient bucket
"""
cref = state.cref
tensor = bucket.buffer()
tensors = [tensor / state.process_group.size()]
key = state.get_key(bucket.get_index())
if tensor.is_sparse:
tensor = tensor.coalesce()
tensor_type = "sparse" if tensor.is_sparse else "dense"
cref.record_start("hook_future_metric", key, f"{cref.backend}_{tensor_type}_allreduce")
fut = state.process_group.allreduce(tensors).get_future()
def callback(fut):
cref.record_end("hook_future_metric", key)
return fut.wait()
return fut.then(callback)
def hybrid_hook(state, bucket):
r"""
A ddp communication hook that uses Gloo default process
group for sparse gradients and NCCL non-default process
group for dense gradients.
Args:
state (object): maintains state during the training process
bucket (GradBucket): gradient bucket
"""
cref = state.cref
tensor = bucket.buffer()
key = state.get_key(bucket.get_index())
if tensor.is_sparse:
cref.record_start("hook_c10d_metric", key, "gloo_sparse_allreduce")
tensor = tensor.coalesce()
tensor = tensor / state.process_group.size()
c10d.all_reduce(tensor, op=c10d.ReduceOp.SUM)
cref.record_end("hook_c10d_metric", key)
fut = torch.futures.Future()
fut.set_result([tensor])
else:
cref.record_start("hook_future_metric", key, "nccl_dense_allreduce")
tensors = [bucket.buffer() / state.process_group.size()]
fut = state.process_group.allreduce(tensors).get_future()
def callback(fut):
cref.record_end("hook_future_metric", key)
return fut.wait()
fut = fut.then(callback)
return fut
def rpc_hook(state, bucket):
r"""
A ddp communication hook that averages sparse and dense tensors using
process_bucket_with_remote_server method.
Args:
state (object): maintains state during the training process
bucket (GradBucket): gradient bucket
"""
return process_bucket_with_remote_server(state, bucket)
def sparse_rpc_hook(state, bucket):
r"""
A ddp communication hook that uses the current backend allreduce
implementation for dense tensors and a server for sparse tensors.
Args:
state (object): maintains state during the training process
bucket (GradBucket): gradient bucket
"""
tensor = bucket.buffer()
if tensor.is_sparse:
return process_bucket_with_remote_server(state, bucket)
else:
cref = state.cref
tensor = [tensor / state.process_group.size()]
key = state.get_key(bucket.get_index())
cref.record_start("hook_future_metric", key, f"{cref.backend}_dense_allreduce")
fut = state.process_group.allreduce(tensor).get_future()
def callback(fut):
cref.record_end("hook_future_metric", key)
return fut.wait()
return fut.then(callback)
| pytorch-master | benchmarks/distributed/rpc/parameter_server/trainer/hooks.py |
import torch.nn as nn
def cel(rank):
r"""A function that creates a CrossEntropyLoss
criterion for training.
Args:
rank (int): worker rank
"""
return nn.CrossEntropyLoss().cuda(rank)
| pytorch-master | benchmarks/distributed/rpc/parameter_server/trainer/criterions.py |
from .criterions import cel
from .ddp_models import basic_ddp_model
from .hook_states import BasicHookState
from .hooks import allreduce_hook, hybrid_hook, rpc_hook, sparse_rpc_hook
from .iteration_steps import basic_iteration_step
from .preprocess_data import preprocess_dummy_data
from .trainer import DdpTrainer
criterion_map = {
"cel": cel
}
ddp_hook_map = {
"allreduce_hook": allreduce_hook,
"hybrid_hook": hybrid_hook,
"rpc_hook": rpc_hook,
"sparse_rpc_hook": sparse_rpc_hook
}
ddp_model_map = {
"basic_ddp_model": basic_ddp_model
}
iteration_step_map = {
"basic_iteration_step": basic_iteration_step
}
preprocess_data_map = {
"preprocess_dummy_data": preprocess_dummy_data
}
hook_state_map = {
"BasicHookState": BasicHookState
}
trainer_map = {
"DdpTrainer": DdpTrainer
}
| pytorch-master | benchmarks/distributed/rpc/parameter_server/trainer/__init__.py |
def basic_iteration_step(self, ddp_model, criterion, optimizer, hook_state, epoch, index, batch):
r"""
A function that performs an iteration of training.
Args:
ddp_model (nn.Module): distributed data parallel model
criterion (nn.Module): loss function to measure model
optimizer (optim.Optimizer): updates model parameters
hook_state (object): ddp communication hook state object
epoch (int): index of pass through the data
index (int): iteration number - 1 in current batch
batch (list): training examples
"""
hook_state.next_batch()
self.record_batch_start(self.epoch_key(epoch, index))
optimizer.zero_grad()
self.record_forward_start(self.epoch_key(epoch, index))
loss = criterion(ddp_model(batch[0]), batch[1])
self.record_forward_end(self.epoch_key(epoch, index))
self.record_backward_start(self.epoch_key(epoch, index))
loss.backward()
self.record_backward_end(self.epoch_key(epoch, index))
optimizer.step()
self.record_batch_end(self.epoch_key(epoch, index))
| pytorch-master | benchmarks/distributed/rpc/parameter_server/trainer/iteration_steps.py |
from torch.nn.parallel import DistributedDataParallel as DDP
def basic_ddp_model(self, rank, model, process_group, hook_state, hook):
r"""
A function that creates a ddp_model and hook_state objects.
The ddp model is initialized with a single device id and
the process group. The ddp_model also registers the communication
hook.
Args:
rank (int): worker rank
model (nn.Module): neural network model
process_group (ProcessGroup): distributed process group
HookState (class): class that will be used to keep track of state
during training.
hook (function): ddp communication hook
"""
ddp_model = DDP(
model, device_ids=[rank], process_group=process_group
)
hook_state = hook_state(self, process_group)
ddp_model.register_comm_hook(hook_state, hook)
return ddp_model, hook_state
| pytorch-master | benchmarks/distributed/rpc/parameter_server/trainer/ddp_models.py |
import functools
import time
from abc import ABC, abstractmethod
from metrics.MetricsLogger import MetricsLogger
import torch
class TrainerBase(ABC):
BATCH_LEVEL_METRIC = "batch_level_metric"
BATCH_ALL = "batch_all"
FORWARD_METRIC = "forward_metric"
FORWARD_PASS = "forward_pass"
BACKWARD_METRIC = "backward_metric"
BACKWARD = "backward"
def __init__(self, rank):
r"""
Inits TrainerBase class.
Args:
rank (int): worker rank
"""
self.__metrics_logger = MetricsLogger(rank)
@abstractmethod
def train(self):
r"""
A method to be implemented by child class that will train a neural network.
"""
return
def record_start(self, type, key, name, cuda=True):
r"""
A method that records the start event for a metric.
Args:
type (str): group id for metric
key (str): unique id for metric within a group
name (str): description of the metric
cuda (bool): indicator to determine if this is a CUDA metric
"""
self.__metrics_logger.record_start(
type,
key,
name,
cuda
)
def record_end(self, type, key):
r"""
A method that records the end event for a metric.
Args:
type (str): group id for metric
key (str): unique id for metric within a group
"""
self.__metrics_logger.record_end(
type,
key
)
def record_batch_start(self, key, cuda=True):
r"""
A helper method that records a batch metric for the
given key. A user should call this at the start of an
iteration step during training.
Args:
key (str): unique id for metric within a group
cuda (bool): indicator to determine if this is a CUDA metric
"""
self.__metrics_logger.record_start(
self.BATCH_LEVEL_METRIC,
key,
self.BATCH_ALL,
cuda
)
def record_batch_end(self, key):
r"""
A helper method that records a batch metric for the
given key. A user should call this at the end of an
iteration step during training.
Args:
key (str): unique id for metric within a group
"""
self.__metrics_logger.record_end(
self.BATCH_LEVEL_METRIC,
key
)
def record_forward_start(self, key, cuda=True):
r"""
A helper method that records a forward metric
for the given key. A user should call this before
their neural network forward.
Args:
key (str): unique id for metric within a group
cuda (bool): indicator to determine if this is a CUDA metric
"""
self.__metrics_logger.record_start(
self.FORWARD_METRIC,
key,
self.FORWARD_PASS,
cuda
)
def record_forward_end(self, key):
r"""
A helper method that records a forward metric
for the given key. A user should call this after their
neural network forward.
Args:
key (str): unique id for metric within a group
"""
self.__metrics_logger.record_end(
self.FORWARD_METRIC,
key
)
def record_backward_start(self, key, cuda=True):
r"""
A helper method that records a backward metric
for the given key. A user should call this before
their .backward() call.
Args:
key (str): unique id for metric within a group
cuda (bool): indicator to determine if this is a CUDA metric
"""
self.__metrics_logger.record_start(
self.BACKWARD_METRIC,
key,
self.BACKWARD,
cuda
)
def record_backward_end(self, key):
r"""
A helper method that records a backward metric
for the given key. A user should call this after
.backward().
Args:
key (str): unique id for metric within a group
"""
self.__metrics_logger.record_end(
self.BACKWARD_METRIC,
key
)
@staticmethod
def methodmetric(name, type="method_metric", cuda=True):
r"""
A decorator that records a metric for the decorated method.
Args:
name (str): description of the metric
type (str): group id for metric
cuda (bool): indicator to determine if this is a CUDA metric
"""
def decorator(function):
@functools.wraps(function)
def wrapper(self, *args):
key = time.time()
self.__metrics_logger.record_start(type, key, name, cuda)
result = function(self, *args)
self.__metrics_logger.record_end(type, key)
return result
return wrapper
return decorator
def get_metrics(self):
r"""
A method that returns metrics captured by the __metrics_logger.
"""
return self.__metrics_logger.get_processed_metrics()
def clear_metrics(self):
r"""
A method that clears __metrics_logger recorded metrics.
"""
return self.__metrics_logger.clear_metrics()
class DdpTrainer(TrainerBase):
def __init__(
self,
process_group,
use_cuda_rpc,
server_rref,
backend,
epochs,
preprocess_data,
create_criterion,
create_ddp_model,
hook_state_class,
hook,
iteration_step
):
r"""
A trainer that implements a DDP training algorithm using a simple hook that performs allreduce
using the process_group implementation.
Args:
process_group (ProcessGroup): distributed process group
use_cuda_rpc (bool): indicator for CUDA RPC
server_rref (RRef): remote reference to the server
backend (str): distributed communication backend
epochs (int): epoch count for training
preprocess_data (function): preprocesses data passed
to the trainer before starting training
create_criterion (function): creates a criterion to calculate loss
create_ddp_model (function): creates a ddp model for the trainer
hook_state_class (class): class that will be used to keep tracking of state
during training.
hook (function): ddp communication hook
iteration_step (function): will perform 1 step of training
"""
super().__init__(process_group.rank())
self.process_group = process_group
self.use_cuda_rpc = use_cuda_rpc
self.server_rref = server_rref
self.backend = backend
self.epochs = epochs
self.preprocess_data = preprocess_data
self.create_criterion = create_criterion
self.create_ddp_model = create_ddp_model
self.hook_state_class = hook_state_class
self.hook = hook
self.iteration_step = iteration_step
self.rank = process_group.rank()
self.trainer_count = process_group.size()
def epoch_key(self, epoch, index):
r"""
A method that returns an encoded key that represents the current epoch and
iteration index.
Args:
epoch (int): epoch index
index (int): iteration index
"""
return f"{epoch},{index}"
def train(self, model, data):
r"""
A method that implements the training algorithm.
Args:
model (nn.Module): neural network model
data (list): training examples
"""
model = model.cuda(self.rank)
data = self.preprocess_data(self.rank, data)
criterion = self.create_criterion(self.rank)
ddp_model, hook_state = self.create_ddp_model(
self, self.rank, model, self.process_group, self.hook_state_class, self.hook
)
optimizer = torch.optim.SGD(ddp_model.parameters(), 1e-4)
for epoch in range(self.epochs):
if epoch % 5 == 0 and self.rank == 0:
print(f"train epoch={epoch}")
for index, batch in enumerate(data):
self.iteration_step(
self, ddp_model, criterion, optimizer, hook_state, epoch, index, batch
)
torch.cuda.synchronize(self.rank)
| pytorch-master | benchmarks/distributed/rpc/parameter_server/trainer/trainer.py |
class BasicHookState:
def __init__(self, cref, process_group):
r"""
A class that holds state information that is needed by the communication hook
during the training algorithm.
Args:
cref (DdpTrainer): reference to the self keyword of the trainer instance
process_group (ProcessGroup): distributed process group
"""
self.cref = cref
self.process_group = process_group
self.batch_number = -1
def get_key(self, bucket_index):
r"""
A method that returns an encoded key that represents the current batch and
bucket index.
Args:
bucket_index (int): index of the bucket being processed in backward
"""
return f"{self.batch_number},{bucket_index}"
def next_batch(self):
r"""
A method that increments batch_number by 1.
"""
self.batch_number += 1
| pytorch-master | benchmarks/distributed/rpc/parameter_server/trainer/hook_states.py |
import random
import time
import torch
import torch.distributed.rpc as rpc
from torch.distributed.rpc import rpc_sync
from agent import AgentBase
class ObserverBase:
def __init__(self):
r"""
Inits observer class
"""
self.id = rpc.get_worker_info().id
def set_state(self, state_size, batch):
r"""
Further initializes observer to be aware of rpc environment
Args:
state_size (list): List of integers denoting dimensions of state
batch (bool): Whether agent will be using batch select action
"""
self.state_size = state_size
self.select_action = AgentBase.select_action_batch if batch else AgentBase.select_action_non_batch
def reset(self):
r"""
Resets state randomly
"""
state = torch.rand(self.state_size)
return state
def step(self, action):
r"""
Generates random state and reward
Args:
action (int): Int received from agent representing action to take on state
"""
state = torch.rand(self.state_size)
reward = random.randint(0, 1)
return state, reward
def run_ob_episode(self, agent_rref, n_steps):
r"""
Runs single observer episode where for n_steps, an action is selected
from the agent based on curent state and state is updated
Args:
agent_rref (RRef): Remote Reference to the agent
n_steps (int): Number of times to select an action to transform state per episode
"""
state, ep_reward = self.reset(), None
rewards = torch.zeros(n_steps)
observer_latencies = []
observer_throughput = []
for st in range(n_steps):
ob_latency_start = time.time()
action = rpc_sync(agent_rref.owner(), self.select_action, args=(
agent_rref, self.id, state))
ob_latency = time.time() - ob_latency_start
observer_latencies.append(ob_latency)
observer_throughput.append(1 / ob_latency)
state, reward = self.step(action)
rewards[st] = reward
return [rewards, ep_reward, observer_latencies, observer_throughput]
| pytorch-master | benchmarks/distributed/rpc/rl/observer.py |
import numpy as np
import time
import torch
import torch.distributed.rpc as rpc
from agent import AgentBase
from observer import ObserverBase
COORDINATOR_NAME = "coordinator"
AGENT_NAME = "agent"
OBSERVER_NAME = "observer{}"
EPISODE_STEPS = 100
class CoordinatorBase:
def __init__(self, batch_size, batch, state_size, nlayers, out_features):
r"""
Coordinator object to run on worker. Only one coordinator exists. Responsible
for facilitating communication between agent and observers and recording benchmark
throughput and latency data.
Args:
batch_size (int): Number of observer requests to process in a batch
batch (bool): Whether to process and respond to observer requests as a batch or 1 at a time
state_size (list): List of ints dictating the dimensions of the state
nlayers (int): Number of layers in the model
out_features (int): Number of out features in the model
"""
self.batch_size = batch_size
self.batch = batch
self.agent_rref = None # Agent RRef
self.ob_rrefs = [] # Observer RRef
agent_info = rpc.get_worker_info(AGENT_NAME)
self.agent_rref = rpc.remote(agent_info, AgentBase)
for rank in range(batch_size):
ob_info = rpc.get_worker_info(OBSERVER_NAME.format(rank + 2))
ob_ref = rpc.remote(ob_info, ObserverBase)
self.ob_rrefs.append(ob_ref)
ob_ref.rpc_sync().set_state(state_size, batch)
self.agent_rref.rpc_sync().set_world(
batch_size, state_size, nlayers, out_features, self.batch)
def run_coordinator(self, episodes, episode_steps, queue):
r"""
Runs n benchmark episodes. Each episode is started by coordinator telling each
observer to contact the agent. Each episode is concluded by coordinator telling agent
to finish the episode, and then the coordinator records benchmark data
Args:
episodes (int): Number of episodes to run
episode_steps (int): Number steps to be run in each episdoe by each observer
queue (SimpleQueue): SimpleQueue from torch.multiprocessing.get_context() for
saving benchmark run results to
"""
agent_latency_final = []
agent_throughput_final = []
observer_latency_final = []
observer_throughput_final = []
for ep in range(episodes):
ep_start_time = time.time()
print(f"Episode {ep} - ", end='')
n_steps = episode_steps
agent_start_time = time.time()
futs = []
for ob_rref in self.ob_rrefs:
futs.append(ob_rref.rpc_async().run_ob_episode(
self.agent_rref, n_steps))
rets = torch.futures.wait_all(futs)
agent_latency, agent_throughput = self.agent_rref.rpc_sync().finish_episode(rets)
self.agent_rref.rpc_sync().reset_metrics()
agent_latency_final += agent_latency
agent_throughput_final += agent_throughput
observer_latency_final += [ret[2] for ret in rets]
observer_throughput_final += [ret[3] for ret in rets]
ep_end_time = time.time()
episode_time = ep_end_time - ep_start_time
print(round(episode_time, 3))
observer_latency_final = [t for s in observer_latency_final for t in s]
observer_throughput_final = [
t for s in observer_throughput_final for t in s]
benchmark_metrics = {'agent latency (seconds)': {},
'agent throughput': {},
'observer latency (seconds)': {},
'observer throughput': {}}
print("For batch size {0}".format(self.batch_size))
print("\nAgent Latency - ", len(agent_latency_final))
agent_latency_final = sorted(agent_latency_final)
for p in [50, 75, 90, 95]:
v = np.percentile(agent_latency_final, p)
print("p" + str(p) + ":", round(v, 3))
p = f'p{p}'
benchmark_metrics['agent latency (seconds)'][p] = round(v, 3)
print("\nAgent Throughput - ", len(agent_throughput_final))
agent_throughput_final = sorted(agent_throughput_final)
for p in [50, 75, 90, 95]:
v = np.percentile(agent_throughput_final, p)
print("p" + str(p) + ":", int(v))
p = f'p{p}'
benchmark_metrics['agent throughput'][p] = int(v)
print("\nObserver Latency - ", len(observer_latency_final))
observer_latency_final = sorted(observer_latency_final)
for p in [50, 75, 90, 95]:
v = np.percentile(observer_latency_final, p)
print("p" + str(p) + ":", round(v, 3))
p = f'p{p}'
benchmark_metrics['observer latency (seconds)'][p] = round(v, 3)
print("\nObserver Throughput - ", len(observer_throughput_final))
observer_throughput_final = sorted(observer_throughput_final)
for p in [50, 75, 90, 95]:
v = np.percentile(observer_throughput_final, p)
print("p" + str(p) + ":", int(v))
p = f'p{p}'
benchmark_metrics['observer throughput'][p] = int(v)
if queue:
queue.put(benchmark_metrics)
| pytorch-master | benchmarks/distributed/rpc/rl/coordinator.py |
from functools import reduce
import time
import threading
import torch
from torch.distributions import Categorical
import torch.distributed.rpc as rpc
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
OBSERVER_NAME = "observer{}"
class Policy(nn.Module):
def __init__(self, in_features, nlayers, out_features):
r"""
Inits policy class
Args:
in_features (int): Number of input features the model takes
nlayers (int): Number of layers in the model
out_features (int): Number of features the model outputs
"""
super(Policy, self).__init__()
self.model = nn.Sequential(
nn.Flatten(1, -1),
nn.Linear(in_features, out_features),
* [nn.Linear(out_features, out_features) for _ in range(nlayers)]
)
self.dim = 0
def forward(self, x):
action_scores = self.model(x)
return F.softmax(action_scores, dim=self.dim)
class AgentBase:
def __init__(self):
r"""
Inits agent class
"""
self.id = rpc.get_worker_info().id
self.running_reward = 0
self.eps = 1e-7
self.rewards = {}
self.future_actions = torch.futures.Future()
self.lock = threading.Lock()
self.agent_latency_start = None
self.agent_latency_end = None
self.agent_latency = []
self.agent_throughput = []
def reset_metrics(self):
r"""
Sets all benchmark metrics to their empty values
"""
self.agent_latency_start = None
self.agent_latency_end = None
self.agent_latency = []
self.agent_throughput = []
def set_world(self, batch_size, state_size, nlayers, out_features, batch=True):
r"""
Further initializes agent to be aware of rpc environment
Args:
batch_size (int): size of batches of observer requests to process
state_size (list): List of ints dictating the dimensions of the state
nlayers (int): Number of layers in the model
out_features (int): Number of out features in the model
batch (bool): Whether to process and respond to observer requests as a batch or 1 at a time
"""
self.batch = batch
self.policy = Policy(reduce((lambda x, y: x * y), state_size), nlayers, out_features)
self.optimizer = optim.Adam(self.policy.parameters(), lr=1e-2)
self.batch_size = batch_size
for rank in range(batch_size):
ob_info = rpc.get_worker_info(OBSERVER_NAME.format(rank + 2))
self.rewards[ob_info.id] = []
self.saved_log_probs = [] if self.batch else {
k: [] for k in range(self.batch_size)}
self.pending_states = self.batch_size
self.state_size = state_size
self.states = torch.zeros(self.batch_size, *state_size)
@staticmethod
@rpc.functions.async_execution
def select_action_batch(agent_rref, observer_id, state):
r"""
Receives state from an observer to select action for. Queues the observers's request
for an action until queue size equals batch size named during Agent initiation, at which point
actions are selected for all pending observer requests and communicated back to observers
Args:
agent_rref (RRef): RRFef of this agent
observer_id (int): Observer id of observer calling this function
state (Tensor): Tensor representing current state held by observer
"""
self = agent_rref.local_value()
observer_id -= 2
self.states[observer_id].copy_(state)
future_action = self.future_actions.then(
lambda future_actions: future_actions.wait()[observer_id].item()
)
with self.lock:
if self.pending_states == self.batch_size:
self.agent_latency_start = time.time()
self.pending_states -= 1
if self.pending_states == 0:
self.pending_states = self.batch_size
probs = self.policy(self.states)
m = Categorical(probs)
actions = m.sample()
self.saved_log_probs.append(m.log_prob(actions).t())
future_actions = self.future_actions
self.future_actions = torch.futures.Future()
future_actions.set_result(actions)
self.agent_latency_end = time.time()
batch_latency = self.agent_latency_end - self.agent_latency_start
self.agent_latency.append(batch_latency)
self.agent_throughput.append(self.batch_size / batch_latency)
return future_action
@staticmethod
def select_action_non_batch(agent_rref, observer_id, state):
r"""
Select actions based on observer state and communicates back to observer
Args:
agent_rref (RRef): RRef of this agent
observer_id (int): Observer id of observer calling this function
state (Tensor): Tensor representing current state held by observer
"""
self = agent_rref.local_value()
observer_id -= 2
agent_latency_start = time.time()
state = state.float().unsqueeze(0)
probs = self.policy(state)
m = Categorical(probs)
action = m.sample()
self.saved_log_probs[observer_id].append(m.log_prob(action))
agent_latency_end = time.time()
non_batch_latency = agent_latency_end - agent_latency_start
self.agent_latency.append(non_batch_latency)
self.agent_throughput.append(1 / non_batch_latency)
return action.item()
def finish_episode(self, rets):
r"""
Finishes the episode
Args:
rets (list): List containing rewards generated by selct action calls during
episode run
"""
return self.agent_latency, self.agent_throughput
| pytorch-master | benchmarks/distributed/rpc/rl/agent.py |
import argparse
import os
import time
import json
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
from coordinator import CoordinatorBase
COORDINATOR_NAME = "coordinator"
AGENT_NAME = "agent"
OBSERVER_NAME = "observer{}"
TOTAL_EPISODES = 10
TOTAL_EPISODE_STEPS = 100
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(description='PyTorch RPC RL Benchmark')
parser.add_argument('--world_size', type=str, default='10')
parser.add_argument('--master_addr', type=str, default='127.0.0.1')
parser.add_argument('--master_port', type=str, default='29501')
parser.add_argument('--batch', type=str, default='True')
parser.add_argument('--state_size', type=str, default='10-20-10')
parser.add_argument('--nlayers', type=str, default='5')
parser.add_argument('--out_features', type=str, default='10')
parser.add_argument('--output_file_path', type=str, default='benchmark_report.json')
args = parser.parse_args()
args = vars(args)
def run_worker(rank, world_size, master_addr, master_port, batch, state_size, nlayers, out_features, queue):
r"""
inits an rpc worker
Args:
rank (int): Rpc rank of worker machine
world_size (int): Number of workers in rpc network (number of observers +
1 agent + 1 coordinator)
master_addr (str): Master address of cooridator
master_port (str): Master port of coordinator
batch (bool): Whether agent will use batching or process one observer
request a at a time
state_size (str): Numerical str representing state dimensions (ie: 5-15-10)
nlayers (int): Number of layers in model
out_features (int): Number of out features in model
queue (SimpleQueue): SimpleQueue from torch.multiprocessing.get_context() for
saving benchmark run results to
"""
state_size = list(map(int, state_size.split('-')))
batch_size = world_size - 2 # No. of observers
os.environ['MASTER_ADDR'] = master_addr
os.environ['MASTER_PORT'] = master_port
if rank == 0:
rpc.init_rpc(COORDINATOR_NAME, rank=rank, world_size=world_size)
coordinator = CoordinatorBase(
batch_size, batch, state_size, nlayers, out_features)
coordinator.run_coordinator(TOTAL_EPISODES, TOTAL_EPISODE_STEPS, queue)
elif rank == 1:
rpc.init_rpc(AGENT_NAME, rank=rank, world_size=world_size)
else:
rpc.init_rpc(OBSERVER_NAME.format(rank),
rank=rank, world_size=world_size)
rpc.shutdown()
def find_graph_variable(args):
r"""
Determines if user specified multiple entries for a single argument, in which case
benchmark is run for each of these entries. Comma separated values in a given argument indicate multiple entries.
Output is presented so that user can use plot repo to plot the results with each of the
variable argument's entries on the x-axis. Args is modified in accordance with this.
More than 1 argument with multiple entries is not permitted.
Args:
args (dict): Dictionary containing arguments passed by the user (and default arguments)
"""
var_types = {'world_size': int,
'state_size': str,
'nlayers': int,
'out_features': int,
'batch': str2bool}
for arg in var_types.keys():
if ',' in args[arg]:
if args.get('x_axis_name'):
raise("Only 1 x axis graph variable allowed")
args[arg] = list(map(var_types[arg], args[arg].split(','))) # convert , separated str to list
args['x_axis_name'] = arg
else:
args[arg] = var_types[arg](args[arg]) # convert string to proper type
def append_spaces(string, length):
r"""
Returns a modified string with spaces appended to the end. If length of string argument
is greater than or equal to length, a single space is appended, otherwise x spaces are appended
where x is the difference between the length of string and the length argument
Args:
string (str): String to be modified
length (int): Size of desired return string with spaces appended
Return: (str)
"""
string = str(string)
offset = length - len(string)
if offset <= 0:
offset = 1
string += ' ' * offset
return string
def print_benchmark_results(report):
r"""
Prints benchmark results
Args:
report (dict): JSON formatted dictionary containing relevant data on the run of this application
"""
print("--------------------------------------------------------------")
print("PyTorch distributed rpc benchmark reinforcement learning suite")
print("--------------------------------------------------------------")
for key, val in report.items():
if key != "benchmark_results":
print(f'{key} : {val}')
x_axis_name = report.get('x_axis_name')
col_width = 7
heading = ""
if x_axis_name:
x_axis_output_label = f'{x_axis_name} |'
heading += append_spaces(x_axis_output_label, col_width)
metric_headers = ['agent latency (seconds)', 'agent throughput',
'observer latency (seconds)', 'observer throughput']
percentile_subheaders = ['p50', 'p75', 'p90', 'p95']
subheading = ""
if x_axis_name:
subheading += append_spaces(' ' * (len(x_axis_output_label) - 1), col_width)
for header in metric_headers:
heading += append_spaces(header, col_width * len(percentile_subheaders))
for percentile in percentile_subheaders:
subheading += append_spaces(percentile, col_width)
print(heading)
print(subheading)
for benchmark_run in report['benchmark_results']:
run_results = ""
if x_axis_name:
run_results += append_spaces(benchmark_run[x_axis_name], max(col_width, len(x_axis_output_label)))
for metric_name in metric_headers:
percentile_results = benchmark_run[metric_name]
for percentile in percentile_subheaders:
run_results += append_spaces(percentile_results[percentile], col_width)
print(run_results)
def main():
r"""
Runs rpc benchmark once if no argument has multiple entries, and otherwise once for each of the multiple entries.
Multiple entries is indicated by comma separated values, and may only be done for a single argument.
Results are printed as well as saved to output file. In case of multiple entries for a single argument,
the plot repo can be used to benchmark results on the y axis with each entry on the x axis.
"""
find_graph_variable(args)
# run once if no x axis variables
x_axis_variables = args[args['x_axis_name']] if args.get('x_axis_name') else [None]
ctx = mp.get_context('spawn')
queue = ctx.SimpleQueue()
benchmark_runs = []
for i, x_axis_variable in enumerate(x_axis_variables): # run benchmark for every x axis variable
if len(x_axis_variables) > 1:
args[args['x_axis_name']] = x_axis_variable # set x axis variable for this benchmark iteration
processes = []
start_time = time.time()
for rank in range(args['world_size']):
prc = ctx.Process(
target=run_worker,
args=(
rank, args['world_size'], args['master_addr'], args['master_port'],
args['batch'], args['state_size'], args['nlayers'],
args['out_features'], queue
)
)
prc.start()
processes.append(prc)
benchmark_run_results = queue.get()
for process in processes:
process.join()
print(f"Time taken benchmark run {i} -, {time.time() - start_time}")
if args.get('x_axis_name'):
# save x axis value was for this iteration in the results
benchmark_run_results[args['x_axis_name']] = x_axis_variable
benchmark_runs.append(benchmark_run_results)
report = args
report['benchmark_results'] = benchmark_runs
if args.get('x_axis_name'):
# x_axis_name was variable so dont save a constant in the report for that variable
del report[args['x_axis_name']]
with open(args['output_file_path'], 'w') as f:
json.dump(report, f)
print_benchmark_results(report)
if __name__ == '__main__':
main()
| pytorch-master | benchmarks/distributed/rpc/rl/launcher.py |
from . import benchmark
import numpy as np
class MatMulBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, B, M, N, K):
super().__init__(mode, device, dtype)
self.B = B
self.M = M
self.N = N
self.K = K
self.d1 = self.rand([B, M, N], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.d2 = self.rand([B, N, K], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.inputs = [self.d1, self.d2]
def forward(self, d1, d2):
y = self.matmul(d1, d2)
return y
def reference(self):
return np.matmul(self.numpy(self.d1), self.numpy(self.d2))
def config(self):
return [self.B, self.M, self.N, self.K]
@staticmethod
def module():
return "batch_matmul"
def memory_workload(self):
if self.mode == "fwd":
sol_count = 1
algorithmic_count = 1
else:
sol_count = 1 + 1
algorithmic_count = 1 + (1 + 1)
buffer_size = (
self.B * self.M * self.N
+ self.B * self.M * self.N
+ self.B * self.N * self.K
)
return {
"sol": buffer_size * sol_count,
"algorithmic": buffer_size * algorithmic_count,
}
def compute_workload(self):
if self.mode == "fwd":
count = 1
else:
count = 1 + (1 + 1)
op_count = 2 * self.B * self.M * self.N * self.K
return op_count * count
@staticmethod
def default_configs():
return [[128, 64, 128, 256]]
benchmark.register_benchmark_class(MatMulBench)
| pytorch-master | benchmarks/tensorexpr/matmul.py |
# This is a copy of rnn_attention from MLPerf, with some common sizes hardcoded
# for benchmarking and some control flow stripped out.
# https://github.com/mlperf/training/blob/master/rnn_translator/pytorch/seq2seq/models/attention.py
from . import benchmark
import torch
class BahdanauAttention(benchmark.Benchmark):
def __init__(self, mode, device, dtype, b, t_q, t_k, n):
super().__init__(mode, device, dtype)
self.b = b
self.t_q = t_q
self.t_k = t_k
self.n = n
self.att_query = self.rand(
[b, t_q, n], device=device, dtype=dtype, requires_grad=self.requires_grad
)
self.att_keys = self.rand(
[b, t_k, n], device=device, dtype=dtype, requires_grad=self.requires_grad
)
self.normalize_bias = self.rand(
[n], device=device, dtype=dtype, requires_grad=self.requires_grad
)
self.linear_att = self.rand(
[n], device=device, dtype=dtype, requires_grad=self.requires_grad
)
self.inputs = [
self.att_query,
self.att_keys,
self.normalize_bias,
self.linear_att,
]
def forward(self, att_query, att_keys, normalize_bias, linear_att):
"""
Calculate Bahdanau score
:param att_query: b x t_q x n
:param att_keys: b x t_k x n
return b x t_q x t_k scores
"""
b, t_k, n = att_keys.size()
t_q = att_query.size(1)
att_query = att_query.unsqueeze(2).expand(b, t_q, t_k, n)
att_keys = att_keys.unsqueeze(1).expand(b, t_q, t_k, n)
sum_qk = att_query + att_keys + normalize_bias
out = torch.tanh(sum_qk).matmul(linear_att)
return out
def reference(self):
return self.numpy(self.forward(*self.inputs))
def config(self):
return [self.b, self.t_q, self.t_k, self.n]
@staticmethod
def module():
return "attention"
def memory_workload(self):
def memsize(t):
return t.numel() * t.element_size()
input_size = (
memsize(self.att_query)
+ memsize(self.att_keys)
+ memsize(self.normalize_bias)
+ memsize(self.linear_att)
)
output_size = 4 * torch.Size([self.b, self.t_q, self.t_k]).numel()
io_size = input_size + output_size
# If matmul is not fused, must write and then read `sum_qk`.
intermediate_size = (
2 * 4 * torch.Size([self.b, self.t_q, self.t_k, self.n]).numel()
)
return {"sol": io_size, "algorithmic": io_size + intermediate_size}
@staticmethod
def default_configs():
mlperf_inference = [1280, 1, 66, 1024]
nvidia = [128, 10, 128, 1024]
return [mlperf_inference, nvidia]
benchmark.register_benchmark_class(BahdanauAttention)
| pytorch-master | benchmarks/tensorexpr/attention.py |
import contextlib
import numpy as np
import os
import time
from . import tensor_engine
import torch
import json
class Benchmark(object):
def __init__(self, mode, device, dtype):
self.mode = mode
self.deterministic = False
self.device = device
self.dtype = dtype
self.output_type = "stdout"
self.print_ir = False
self.print_kernel = False
if mode == "both":
self.requires_grad = True
elif mode == "fwd":
self.requires_grad = False
else:
raise ValueError("invalid mode: %s" % (mode))
self.result_grad = None
self.grad_variables = []
self.engine = tensor_engine.get_engine()
self.engine.reset(device)
# forward all member functions in self.engine to self
for method in dir(self.engine):
if not callable(getattr(self.engine, method)):
continue
# don't forward if this function is overriden here
if hasattr(self, method):
continue
# don't forward if it is a internal function
if method.startswith("_"):
continue
method_engine = getattr(self.engine, method)
setattr(self, method, method_engine)
def forward(self):
"""do one step worth of computation
"""
raise ValueError("this method should be reimplemented by subclass")
def check(self):
if not self.deterministic:
return
np.testing.assert_allclose(
self.reference(), self.numpy(self.compute()), atol=1e-2
)
def config(self):
"""returns an array for the current benchmark configs
"""
raise ValueError("this method should be reimplemented by subclass")
def desc(self):
"""return the description of the current benchmark
"""
config = self.config()
config_str = "_".join([str(x) for x in config])
device = self.device
if "NNC_NUM_THREADS" in os.environ:
num_threads_str = os.environ["NNC_NUM_THREADS"]
device += num_threads_str
return "%s: %s_%s_%s_%s" % (
self.engine.mode,
self.module(),
self.mode,
device,
config_str,
)
@staticmethod
def module():
raise ValueError("this method should be reimplemented by subclass")
def memory_workload(self):
raise ValueError("this method should be reimplemented by subclass")
def compute_workload(self):
"""return the number of scalar operations it takes to finish the tensor op"""
return None
@staticmethod
def input_iterable():
"""A benchmark child class should return true if it utilizes the input iter arg"""
return False
def dtype_to_bytes(self) :
return torch.tensor(0, dtype=self.dtype).element_size()
@staticmethod
def default_configs():
"""return a list of defualt configs for this benchmark"""
raise ValueError("this method should be reimplemented by subclass")
def is_supported(self):
return True
def rand(self, shape, device=None, dtype=None, requires_grad=False):
v = self.engine.rand(shape, device=device, dtype=dtype, requires_grad=requires_grad)
if requires_grad:
self.grad_variables.append(v)
return v
def nchw_rand(self, shape, device=None, requires_grad=False):
v = self.engine.nchw_rand(shape, device=device, requires_grad=requires_grad)
if requires_grad:
self.grad_variables.append(v)
return v
def compute(self):
if self.bm_jit:
return self.bm_jit(*self.inputs)
else:
return self.forward(*self.inputs)
def run(self, args):
self.print_ir = args.print_ir
if args.cuda_fuser == "old" :
torch._C._jit_override_can_fuse_on_gpu(True)
if args.print_kernel :
os.environ['PYTORCH_FUSION_DEBUG'] = '1'
return self.run_impl(True)
elif args.cuda_fuser == "te" :
torch._C._jit_set_texpr_fuser_enabled(True)
with cuda_pointwise_context(
args.cuda_pointwise_loop_levels,
args.cuda_pointwise_block_count,
args.cuda_pointwise_block_size,
):
return self.run_impl(True)
elif args.cuda_fuser == "nvf" :
torch._C._jit_set_nvfuser_enabled(True)
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_bailout_depth(20)
if args.print_kernel :
os.environ['PYTORCH_CUDA_FUSER_DEBUG'] = '1'
return self.run_impl(True)
else :
return self.run_impl(False)
def run_impl(self, use_fuser):
warmups = 10
if self.device == "cuda":
iters = 1000
else:
iters = 10
engine = tensor_engine.get_engine()
self.bm_jit = None
for i in range(warmups + iters):
if i == warmups:
if self.device == "cuda":
engine.sync_cuda()
time_start = time.time()
if i == 0:
if self.jit_mode == "trace" and use_fuser :
self.bm_jit = torch.jit.trace(
self.forward, example_inputs=self.inputs, check_trace=False
)
if callable(getattr(self, "reference", None)):
self.check()
else:
print("Warning: no reference result for ", self.module())
elif i == 1:
# The fusion graph is visible after the first iter is executed
if self.jit_mode == "trace" and use_fuser and self.print_ir :
print(self.bm_jit.graph_for(*self.inputs))
z = self.compute()
if self.mode == "both":
if self.result_grad is None:
self.result_grad = engine.rand_like(z)
engine.backward([z], [self.result_grad], self.grad_variables)
if self.device == "cuda":
engine.sync_cuda()
duration = time.time() - time_start
iter_time = duration / iters
memory_workload = self.memory_workload()
compute_workload = self.compute_workload()
result_dict = {
"desc": self.desc(),
"us": iter_time * 1e6,
"sol": memory_workload["sol"] * self.dtype_to_bytes() / iter_time / 1e9,
"algorithmic": memory_workload["algorithmic"] * self.dtype_to_bytes() / iter_time / 1e9,
}
if compute_workload:
result_dict["compute_workload"] = compute_workload / iter_time / 1e9
self.dump_result(result_dict)
def dump_result(self, result_dict):
if self.output_type == "json":
print(json.dumps(result_dict))
elif self.output_type == "stdout":
msg = "%s: %.2f us, SOL %.2f GB/s, algorithmic %.2f GB/s" % (
result_dict["desc"],
result_dict["us"],
result_dict["sol"],
result_dict["algorithmic"],
)
if "compute_workload" in result_dict:
msg += ", compute %.2f Gops/s" % result_dict["compute_workload"]
print(msg)
else:
raise Exception("Unknown output_type " + self.output_type)
@contextlib.contextmanager
def cuda_pointwise_context(loop_levels, block_count, block_size):
if loop_levels:
old_loop_levels = torch._C._jit_get_te_cuda_pointwise_loop_levels()
torch._C._jit_set_te_cuda_pointwise_loop_levels(loop_levels)
if block_count:
old_block_count = torch._C._jit_get_te_cuda_pointwise_block_count()
torch._C._jit_set_te_cuda_pointwise_block_count(block_count)
if block_size:
old_block_size = torch._C._jit_get_te_cuda_pointwise_block_size()
torch._C._jit_set_te_cuda_pointwise_block_size(block_size)
yield
if loop_levels:
torch._C._jit_set_te_cuda_pointwise_loop_levels(old_loop_levels)
if block_count:
torch._C._jit_set_te_cuda_pointwise_block_count(old_block_count)
if block_size:
torch._C._jit_set_te_cuda_pointwise_block_size(old_block_size)
# Auxiliary class to facilitate dynamic input shape
class DynamicShape(object):
r'''
An Auxiliary class for dynamic shape benchmarks
Pre-computes input with random shapes and also
modifies the compute method so in each call the
fuser sees a different input tensor shape
'''
# Number of random inputs in an instance
SAMPLE_SIZE = 100
def __init__(self, dynamic_range=1.2):
self._input_samples = []
self._input_sample_index = 0
self._dynamic_range = 1. / dynamic_range if dynamic_range > 1.0 else dynamic_range
self._enable_dynamic_shapes = True
# Returns the input test case that current index points to
@property
def inputs(self):
return self._input_samples[self._input_sample_index]
# An inputs assignment actually adds a test case in the class buffer
@inputs.setter
def inputs(self, val):
self._input_samples.append(val)
# Runs normal compute while increment test case index
def compute(self):
super().compute()
self._input_sample_index = (self._input_sample_index + 1) % self.SAMPLE_SIZE
# Defined by benchmark, the benchmark needs to specify the input
# tensor construction in this method, essentially the same way
# a benchmark creates the inputs list in the initializer
def instantiate_input(self):
raise NotImplementedError
# Instantiate random shaped inputs and start the benchmark run
def run(self, args):
# force disable dynamic shape from command line
if args.no_dynamic_shape:
self._enable_dynamic_shapes = False
self.load_inputs()
super().run(args)
# pre-compute inputs so the creations of random tensors
# do not add to the compute time
def load_inputs(self):
for i in range(self.SAMPLE_SIZE - 1):
self.instantiate_input()
# returns a randomized shape
def rand_shape(self, shape):
if not self._enable_dynamic_shapes:
return shape
ratios = np.random.uniform(self._dynamic_range, 1.0, len(shape))
dyn_shape = list(
np.multiply(shape, ratios).astype(int)
)
return dyn_shape
benchmark_classes = []
def register_benchmark_class(benchmark_cls):
benchmark_classes.append(benchmark_cls)
| pytorch-master | benchmarks/tensorexpr/benchmark.py |
from . import benchmark
import torch
class RNNEltwise(benchmark.Benchmark):
def __init__(self, mode, device, dtype, b, hs):
super().__init__(mode, device, dtype)
self.b = b
self.hs = hs
self.input = self.rand(
[b, 4 * hs], device=device, dtype=dtype, requires_grad=self.requires_grad
)
self.hx = self.rand(
[b, 4 * hs], device=device, dtype=dtype, requires_grad=self.requires_grad
)
self.cx = self.rand(
[b, hs], device=device, dtype=dtype, requires_grad=self.requires_grad
)
self.b_ih = self.rand(
[b, 4 * hs], device=device, dtype=dtype, requires_grad=self.requires_grad
)
self.b_hh = self.rand(
[b, 4 * hs], device=device, dtype=dtype, requires_grad=self.requires_grad
)
self.inputs = [
self.input,
self.hx,
self.cx,
self.b_ih,
self.b_hh,
]
def forward(self, input, hx, cx, b_ih, b_hh):
gates = input + hx + b_ih + b_hh
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * torch.tanh(cy)
return hy, cy
def config(self):
return [self.b, self.hs]
@staticmethod
def module():
return "rnn_eltwise"
def memory_workload(self):
def memsize(t):
return t.numel() * t.element_size()
input_size = sum([memsize(t) for t in self.inputs])
output_size = 2 * memsize(self.cx)
io_size = input_size + output_size
return {"sol": io_size, "algorithmic": io_size}
@staticmethod
def default_configs():
return [[64, 512]]
benchmark.register_benchmark_class(RNNEltwise)
class DynamicLSTM(benchmark.DynamicShape, RNNEltwise):
def __init__(self, mode, device, dtype, b, hs):
benchmark.DynamicShape.__init__(self)
RNNEltwise.__init__(self, mode, device, dtype, b, hs)
def instantiate_input(self):
b, hs = self.rand_shape([self.b, self.hs])
self.input = self.rand(
[b, 4 * hs], device=self.device, dtype=self.dtype, requires_grad=self.requires_grad
)
self.hx = self.rand(
[b, 4 * hs], device=self.device, dtype=self.dtype, requires_grad=self.requires_grad
)
self.cx = self.rand(
[b, hs], device=self.device, dtype=self.dtype, requires_grad=self.requires_grad
)
self.b_ih = self.rand(
[b, 4 * hs], device=self.device, dtype=self.dtype, requires_grad=self.requires_grad
)
self.b_hh = self.rand(
[b, 4 * hs], device=self.device, dtype=self.dtype, requires_grad=self.requires_grad
)
self.inputs = [
self.input,
self.hx,
self.cx,
self.b_ih,
self.b_hh,
]
@staticmethod
def module():
return "dynamic_lstm"
benchmark.register_benchmark_class(DynamicLSTM)
| pytorch-master | benchmarks/tensorexpr/rnn_eltwise.py |
from . import benchmark
class ReduceBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, case, M, N, K, skip_input_transform):
super().__init__(mode, device, dtype)
self.case = case
self.M = M
self.N = N
self.K = K
self._set_skip_input_transform(skip_input_transform)
self.inputs = [self.randn(
[M, N, K], device=device, dtype=dtype, requires_grad=self.requires_grad
)]
if case == "row":
self.dims = [1, 2]
elif case == "mid":
self.dims = [0, 2]
elif case == "col":
self.dims = [0, 1]
elif case == "full":
self.dims = [0, 1, 2]
else:
raise ValueError("invalid case: %s" % case)
def forward(self, inputs):
if self.skip_input_transform:
x = inputs
else:
x = self.add(inputs, 0.001)
y = self.sum(x, self.dims)
return y
def config(self):
if self.case == "full":
return [self.M * self.N * self.K, self._skip_input_transform_str()]
return [self.M, self.N, self.K, self._skip_input_transform_str()]
@staticmethod
def default_configs():
return [
# [512, 512, 512],
[512, 64, 512, "s0"],
]
@staticmethod
def module():
return "reduce"
def memory_workload(self):
if self.mode == "fwd":
sol_count = 1
algorithmic_count = 1
else:
sol_count = (1) + (1)
algorithmic_count = 1 + 1
buffer_size = self.M * self.N * self.K
return {
"sol": buffer_size * sol_count,
"algorithmic": buffer_size * algorithmic_count,
}
def _set_skip_input_transform(self, input_str):
# In the test setting, s1 will skip the input transformation, and s0 will not.
if input_str == "s0":
self.skip_input_transform = False
elif input_str == "s1":
self.skip_input_transform = True
else:
raise ValueError('invalid skip_input_transform: %s' % (input_str))
def _skip_input_transform_str(self):
if self.skip_input_transform:
return "s1"
else:
return "s0"
class ReduceRowBench(ReduceBench):
def __init__(self, mode, device, dtype, M, N, K, skip_input_transform):
super(ReduceRowBench, self).__init__(mode, device, dtype, "row", M, N, K, skip_input_transform)
@staticmethod
def module():
return "reduce_row"
class ReduceMidBench(ReduceBench):
def __init__(self, mode, device, dtype, M, N, K, skip_input_transform):
super(ReduceMidBench, self).__init__(mode, device, dtype, "mid", M, N, K, skip_input_transform)
@staticmethod
def module():
return "reduce_mid"
class ReduceColBench(ReduceBench):
def __init__(self, mode, device, dtype, M, N, K, skip_input_transform):
super(ReduceColBench, self).__init__(mode, device, dtype, "col", M, N, K, skip_input_transform)
@staticmethod
def module():
return "reduce_col"
class ReduceFullBench(ReduceBench):
def __init__(self, mode, device, dtype, M, skip_input_transform):
super(ReduceFullBench, self).__init__(mode, device, dtype, "full", M, 1, 1, skip_input_transform)
def config(self):
return [self.M * self.N * self.K, self._skip_input_transform_str()]
@staticmethod
def default_configs():
return [
[1 << 24, "s1"],
]
@staticmethod
def module():
return "reduce_full"
class Reduce2DBench(benchmark.Benchmark):
'''
A benchmark class to validate 2 dimensional reduction performance.
Only a simple add is fused to induce the fuser and isolate reduction perf.
'''
def __init__(self, mode, device, dtype, red_dim, dim0, dim1):
super().__init__(mode, device, dtype)
self.red_dim = red_dim
self.dim0 = dim0
self.dim1 = dim1
self.inputs = [self.randn(
[dim0, dim1], device=device, dtype=dtype, requires_grad=self.requires_grad
)]
if red_dim != 0 and red_dim != 1 :
raise ValueError("invalid reduction dimension: {}".format(red_dim))
def forward(self, inputs):
x = self.add(inputs, 0.001)
y = self.sum(x, [self.red_dim])
return y
def config(self):
return [self.red_dim, self.dim0, self.dim1]
@staticmethod
def default_configs():
return [
[1, 640, 524288],
]
@staticmethod
def module():
return "reduce2d"
@staticmethod
def input_iterable() :
return True
def memory_workload(self):
assert self.mode == "fwd", "Only the forward operation is modeled!"
buffer_size = self.dim0 * self.dim1
if self.red_dim == 0 :
buffer_size += self.dim1
else :
buffer_size += self.dim0
return {
"sol": buffer_size,
"algorithmic": buffer_size,
}
class Reduce2DInnerBench(Reduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super(Reduce2DInnerBench, self).__init__(mode, device, dtype, 1, dim0, dim1)
@staticmethod
def default_configs():
parent_config = Reduce2DBench.default_configs()[0]
return [parent_config[1:]]
def config(self):
parent_config = super(Reduce2DInnerBench, self).config()
return parent_config[1:]
@staticmethod
def module():
return "reduce2d_inner"
class Reduce2DOuterBench(Reduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super(Reduce2DOuterBench, self).__init__(mode, device, dtype, 0, dim0, dim1)
@staticmethod
def default_configs():
parent_config = Reduce2DBench.default_configs()[0]
return [parent_config[1:]]
def config(self):
parent_config = super(Reduce2DOuterBench, self).config()
return parent_config[1:]
@staticmethod
def module():
return "reduce2d_outer"
benchmark.register_benchmark_class(ReduceRowBench)
benchmark.register_benchmark_class(ReduceMidBench)
benchmark.register_benchmark_class(ReduceColBench)
benchmark.register_benchmark_class(Reduce2DInnerBench)
benchmark.register_benchmark_class(Reduce2DOuterBench)
benchmark.register_benchmark_class(ReduceFullBench)
class DynamicReduce2DBench(benchmark.DynamicShape, Reduce2DBench):
'''
A benchmark class to validate 2 dimensional reduction performance.
Only a simple add is fused to induce the fuser and isolate reduction perf.
'''
def __init__(self, mode, device, dtype, red_dim, dim0, dim1):
benchmark.DynamicShape.__init__(self)
Reduce2DBench.__init__(self, mode, device, dtype, red_dim, dim0, dim1)
def instantiate_input(self):
dim0, dim1 = self.rand_shape([self.dim0, self.dim1])
self.inputs = [self.randn(
[dim0, dim1], device=self.device, dtype=self.dtype, requires_grad=self.requires_grad
)]
@staticmethod
def module():
return "dynamicreduce2d"
class DynamicReduce2DInnerBench(DynamicReduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super().__init__(mode, device, dtype, 1, dim0, dim1)
@staticmethod
def default_configs():
parent_config = DynamicReduce2DBench.default_configs()[0]
return [parent_config[1:]]
def config(self):
parent_config = super(DynamicReduce2DInnerBench, self).config()
return parent_config[1:]
@staticmethod
def module():
return "reduce2d_dynamic_inner"
class DynamicReduce2DOuterBench(DynamicReduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super().__init__(mode, device, dtype, 0, dim0, dim1)
@staticmethod
def default_configs():
parent_config = DynamicReduce2DBench.default_configs()[0]
return [parent_config[1:]]
def config(self):
parent_config = super(DynamicReduce2DInnerBench, self).config()
return parent_config[1:]
@staticmethod
def module():
return "reduce2d_dynamic_outer"
benchmark.register_benchmark_class(DynamicReduce2DInnerBench)
benchmark.register_benchmark_class(DynamicReduce2DOuterBench)
| pytorch-master | benchmarks/tensorexpr/reduction.py |
from . import benchmark
import numpy as np
import torch
class Concat2D2InputBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, I1_D1, I1_D2, I2_D1, I2_D2, concat_dim):
super().__init__(mode, device, dtype)
self.I1_D1 = I1_D1
self.I1_D2 = I1_D2
self.I2_D1 = I2_D1
self.I2_D2 = I2_D2
self.concat_dim = concat_dim
self.input1 = self.randn([I1_D1, I1_D2], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.input2 = self.randn([I2_D1, I2_D2], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.inputs = [self.input1, self.input2]
def forward(self, input1, input2):
x1 = self.add(input1, 0.00001)
x2 = self.add(input2, 0.00001)
y = self.cat((x1, x2), dim=self.concat_dim)
return y
def reference(self):
return np.concatenate((self.numpy(self.input1), self.numpy(self.input2)), axis=concat_dim)
def config(self):
return [self.I1_D1, self.I1_D2, self.I2_D1, self.I2_D2, self.concat_dim]
@staticmethod
def module():
return "concat2d2input"
def memory_workload(self):
if self.mode == "fwd":
sol_count = 1 + 1
algorithmic_count = 3 + 1
else:
sol_count = (1 + 1) + (1 + 1)
algorithmic_count = (3 + 1) + (3 + 1)
buffer_size = self.I1_D1 * self.I1_D2 + self.I2_D1 * self.I2_D2
return {
"sol": buffer_size * sol_count,
"algorithmic": buffer_size * algorithmic_count,
}
@staticmethod
def default_configs():
return [
[1, 160, 1, 14, 1],
[1, 580, 1, 174, 1],
[20, 160, 20, 14, 1],
[20, 580, 20, 174, 1],
[8, 512, 8, 512, 1],
[1 << 13, 1060, 1 << 13, 1040, 1],
[1 << 13, 2000, 1 << 13, 1074, 1],
[1 << 15, 1060, 1 << 15, 2670, 1],
[1 << 15, 5120, 1 << 15, 2512, 1]
]
benchmark.register_benchmark_class(Concat2D2InputBench)
class ConcatGraphOptBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, I1_D1, I1_D2, I2_D1, I2_D2, concat_dim):
super().__init__(mode, device, dtype)
self.I1_D1 = I1_D1
self.I1_D2 = I1_D2
self.I2_D1 = I2_D1
self.I2_D2 = I2_D2
self.concat_dim = concat_dim
self.input1 = self.randn([I1_D1, I1_D2], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.input2 = self.randn([I2_D1, I2_D2], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.inputs = [self.input1, self.input2]
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_cat_wo_conditionals(True)
def forward(self, input1, input2):
x1 = self.add(input1, 0.00001)
x2 = self.add(input2, 0.00001)
y = self.cat((x1, x2), dim=self.concat_dim)
z = self.relu(y)
return z
def reference(self):
return np.concatenate((self.numpy(self.input1), self.numpy(self.input2)), axis=concat_dim)
def config(self):
return [self.I1_D1, self.I1_D2, self.I2_D1, self.I2_D2, self.concat_dim]
@staticmethod
def module():
return "concatGraphOpt"
def memory_workload(self):
if self.mode == "fwd":
sol_count = 1 + 1
algorithmic_count = 3 + 1
else:
sol_count = (1 + 1) + (1 + 1)
algorithmic_count = (3 + 1) + (3 + 1)
buffer_size = self.I1_D1 * self.I1_D2 + self.I2_D1 * self.I2_D2
return {
"sol": buffer_size * sol_count,
"algorithmic": buffer_size * algorithmic_count,
}
@staticmethod
def default_configs():
return [
[1 << 13, 1060, 1 << 13, 1040, 1],
[1 << 13, 2000, 1 << 13, 1074, 1],
[1 << 15, 1060, 1 << 15, 2670, 1],
[1 << 15, 5120, 1 << 15, 2512, 1]
]
benchmark.register_benchmark_class(ConcatGraphOptBench)
| pytorch-master | benchmarks/tensorexpr/concat.py |
tensor_engine = None
def unsupported(func):
def wrapper(self):
return func(self)
wrapper.is_supported = False
return wrapper
def is_supported(method):
if hasattr(method, "is_supported"):
return method.is_supported
return True
def set_engine_mode(mode):
global tensor_engine
if mode == "tf":
from . import tf_engine
tensor_engine = tf_engine.TensorFlowEngine()
elif mode == "pt":
from . import pt_engine
tensor_engine = pt_engine.TorchTensorEngine()
elif mode == "topi":
from . import topi_engine
tensor_engine = topi_engine.TopiEngine()
elif mode == "relay":
from . import relay_engine
tensor_engine = relay_engine.RelayEngine()
elif mode == "nnc":
from . import nnc_engine
tensor_engine = nnc_engine.NncEngine()
else:
raise ValueError("invalid tensor engine mode: %s" % (mode))
tensor_engine.mode = mode
def get_engine():
if tensor_engine is None:
raise ValueError("use of get_engine, before calling set_engine_mode is illegal")
return tensor_engine
| pytorch-master | benchmarks/tensorexpr/tensor_engine.py |
from . import benchmark
class PoolingBench(benchmark.Benchmark):
def __init__(self, case, mode, device, dtype, kernel_size, N, C, H, W):
super().__init__(mode, device)
self.case = case
self.kernel_size = kernel_size
self.N = N
self.C = C
self.H = H
self.W = W
self.data = self.rand(
[N, C, H, W], device=device, dtype=dtype, requires_grad=self.requires_grad
)
def forward(self):
if self.case == "maxpool":
y = self.max_pool2d(self.data, self.kernel_size, stride=1)
elif self.case == "avgpool":
y = self.avg_pool2d(self.data, self.kernel_size, stride=1)
return y
def config(self):
return [self.kernel_size, self.N, self.C, self.H, self.W]
def memory_workload(self):
if self.mode == "fwd":
sol_count = 1 + 1
algorithmic_count = 1 + 1
else:
sol_count = (1 + 1) + (1 + 1)
algorithmic_count = (1 + 1) + (2 + 1)
buffer_size = self.N * self.C * self.H * self.W
return {
"sol": buffer_size * sol_count,
"algorithmic": buffer_size * algorithmic_count,
}
@staticmethod
def default_configs():
return [[3, 16, 32, 256, 256]]
class MaxPoolBench(PoolingBench):
def __init__(self, *args):
super().__init__("maxpool", *args)
@staticmethod
def module():
return "maxpool"
class AvgPoolBench(PoolingBench):
def __init__(self, *args):
super().__init__("avgpool", *args)
@staticmethod
def module():
return "avgpool"
benchmark.register_benchmark_class(MaxPoolBench)
benchmark.register_benchmark_class(AvgPoolBench)
| pytorch-master | benchmarks/tensorexpr/pooling.py |
from . import benchmark
import itertools
import numpy as np
import torch
import scipy.special
# A template class for elementwise operations.
# A derived class will override the class instance to customize its behavior.
class ElementBench(benchmark.Benchmark):
# List of customization class variables.
op_str = None
binary_op_pt_func = None
binary_op_np_func = None
unary_op_pt_func = None
unary_op_np_func = None
split_input = True
def __init__(self, mode, device, dtype, N):
super().__init__(mode, device, dtype)
self.N = N
self.d1 = self.rand([N], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.d2 = self.rand([N], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.d3 = self.rand([N], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.d4 = self.rand([N], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.inputs = [self.d1, self.d2, self.d3, self.d4]
self.deterministic = "rand" not in self.op_str
def _eval(self, d1, d2, d3, d4, binary_op, unary_op):
if not binary_op:
def binary_op(x, y):
return x + y
if not unary_op:
def unary_op(x):
return x
if self.split_input:
d1 = unary_op(d1)
d2 = unary_op(d2)
d3 = unary_op(d3)
d4 = unary_op(d4)
else:
d2 = unary_op(d1 + 0.001)
d3 = unary_op(d1 + 0.002)
d4 = unary_op(d1 + 0.003)
d1 = unary_op(d1)
a = binary_op(d1, d2)
b = binary_op(d3, d4)
c = a + b
return c
def forward(self, d1, d2, d3, d4):
binary_op = self.__class__.binary_op_pt_func
unary_op = self.__class__.unary_op_pt_func
return self._eval(d1, d2, d3, d4, binary_op, unary_op)
def reference(self):
binary_op = self.__class__.binary_op_np_func
unary_op = self.__class__.unary_op_np_func
[d1, d2, d3, d4] = [self.numpy(d) for d in [self.d1, self.d2, self.d3, self.d4]]
return self._eval(d1, d2, d3, d4, binary_op, unary_op)
def config(self):
return [self.N]
@classmethod
def module(cls):
return "element_" + cls.op_str
def memory_workload(self):
input_count = len(self.inputs)
if self.mode == "fwd":
if self.split_input:
sol_count = input_count + 1
algorithmic_count = input_count + 1
else:
sol_count = 1 + 1
algorithmic_count = 1 + 1
if "rand" in self.op_str:
sol_count = 1
algorithmic_count = 1
else:
if self.split_input:
sol_count = (input_count + 1) + (1 + input_count)
algorithmic_count = (input_count + 1) + ((2 + 1) * input_count)
else:
sol_count = 1 + 1
algorithmic_count = 1 + 1
if "rand" in self.op_str:
sol_count = 1
algorithmic_count = 1
buffer_size = self.N
return {
"sol": buffer_size * sol_count,
"algorithmic": buffer_size * algorithmic_count,
}
@staticmethod
def default_configs():
return [[1 << 25]]
def register_element_ops():
binary_op_list = [
["mul", lambda a, b: a * b],
["add", lambda a, b: a + b],
["sub", lambda a, b: a - b],
["div", lambda a, b: a / (b + 1e-4)],
[
"pow",
lambda a, b: torch.pow(a, b),
lambda a, b: np.power(a, b),
], # no fuson triggered
["max", lambda a, b: torch.max(a, b), lambda a, b: np.maximum(a, b)],
["min", lambda a, b: torch.min(a, b), lambda a, b: np.minimum(a, b)],
]
unary_op_list = [
["erf", lambda x: torch.erf(x), lambda x: scipy.special.erf(x)],
["exp", lambda x: torch.exp(x), lambda x: np.exp(x)],
["sin", lambda x: torch.sin(x), lambda x: np.sin(x)],
["cos", lambda x: torch.cos(x), lambda x: np.cos(x)],
["rand_like", lambda x: torch.rand_like(x), lambda x: np.random.rand(*x.shape)],
]
for split_input, binary_op in itertools.product([True, False], binary_op_list):
# Make a copy of ElementBench
if len(binary_op) == 2:
[op_str, op_pt_func] = binary_op
op_np_func = op_pt_func
elif len(binary_op) == 3:
[op_str, op_pt_func, op_np_func] = binary_op
split_str = "split" if split_input else "shared"
op_str = split_str + "_" + op_str
bm_cls = type("ElementBench_" + op_str, (ElementBench,), {})
bm_cls.op_str = op_str
bm_cls.binary_op_pt_func = op_pt_func
bm_cls.binary_op_np_func = op_np_func
bm_cls.split_input = split_input
benchmark.register_benchmark_class(bm_cls)
for split_input, unary_op in itertools.product([True, False], unary_op_list):
# Make a copy of ElementBench
if len(unary_op) == 2:
[op_str, op_pt_func] = unary_op
op_np_func = op_pt_func
elif len(unary_op) == 3:
[op_str, op_pt_func, op_np_func] = unary_op
split_str = "split" if split_input else "shared"
op_str = split_str + "_" + op_str
bm_cls = type("ElementBench_" + op_str, (ElementBench,), {})
bm_cls.op_str = op_str
bm_cls.unary_op_pt_func = op_pt_func
bm_cls.unary_op_np_func = op_np_func
bm_cls.split_input = split_input
benchmark.register_benchmark_class(bm_cls)
# benchmark.register_benchmark_class(ElementMulBench)
register_element_ops()
class SimpleElementBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, N):
super().__init__(mode, device, dtype)
self.N = N
self.data = self.rand([N], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.inputs = [self.data]
def forward(self, data):
a = data + 0.001
b = a + 0.002
return b
def reference(self):
binary_op = self.__class__.binary_op_np_func
unary_op = self.__class__.unary_op_np_func
[d1, d2, d3, d4] = [self.numpy(d) for d in [self.d1, self.d2, self.d3, self.d4]]
return self._eval(d1, d2, d3, d4, binary_op, unary_op)
def config(self):
return [self.N]
@staticmethod
def input_iterable():
return True
@classmethod
def module(cls):
return "simple_element"
def memory_workload(self):
input_count = len(self.inputs)
if self.mode == "fwd":
sol_count = 2
algorithmic_count = 2
else:
sol_count = 2
algorithmic_count = 2
buffer_size = self.N
return {
"sol": buffer_size * sol_count,
"algorithmic": buffer_size * algorithmic_count,
}
@staticmethod
def default_configs():
return [[1 << 25]]
benchmark.register_benchmark_class(SimpleElementBench)
class DynamicSimpleElementBench(benchmark.DynamicShape, SimpleElementBench):
def __init__(self, mode, device, dtype, N):
benchmark.DynamicShape.__init__(self)
SimpleElementBench.__init__(self, mode, device, dtype, N)
@classmethod
def module(cls):
return "simple_dynamic_element"
def instantiate_input(self):
N, = self.rand_shape([self.N])
data = self.rand([N], device=self.device, dtype=self.dtype, requires_grad=self.requires_grad)
self.inputs = [data]
benchmark.register_benchmark_class(DynamicSimpleElementBench)
| pytorch-master | benchmarks/tensorexpr/elementwise.py |
import torch
import torch._C._te as te
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import argparse
class kernel_arena_scope(object):
def __enter__(self):
self.scope = te.KernelScope()
def __exit__(self, typ, val, traceback):
self.scope = None
unary_ops = [
("sin", torch.sin),
("cos", torch.cos),
("tan", torch.tan),
("asin", torch.asin),
("acos", torch.acos),
("atan", torch.atan),
("sinh", torch.sinh),
("cosh", torch.cosh),
("tanh", torch.tanh),
("sigmoid", torch.sigmoid),
("exp", torch.exp),
("expm1", torch.expm1),
("expm1", torch.expm1),
("abs", torch.abs),
("log", torch.log),
("fast_log", torch.log),
("log2", torch.log2),
("log10", torch.log10),
("log1p", torch.log1p),
("erf", torch.erf),
("erfc", torch.erfc),
("sqrt", torch.sqrt),
("rsqrt", torch.rsqrt),
("ceil", torch.ceil),
("floor", torch.floor),
("round", torch.round),
("trunc", torch.trunc),
("lgamma", torch.lgamma),
# ("frac", torch.frac), # seems unimplemented
# ("isnan", torch.isnan), # no out variant
]
def gen_unary_nnc_fun(nnc_name):
def nnc_fun(A, B):
def compute(i, j):
return getattr(A.load([i, j]), nnc_name)()
return compute
return nnc_fun
def gen_unary_torch_fun(torch_op):
def torch_fun(a, b, out):
def fun():
return torch_op(a, out=out)
return fun
return torch_fun
def gen_binary_nnc_fun(fn):
def nnc_fun(A, B):
def compute(i, j):
return fn(A.load([i, j]), B.load([i, j]))
return compute
return nnc_fun
def gen_binary_torch_fun(fn):
def pt_fun(a, b, out):
def fun():
return fn(a, b, out=out)
return fun
return pt_fun
def gen_int_comparison_tensors(N, M):
return (torch.randint(0, 3, (N, M)), torch.randint(0, 3, (N, M)), torch.empty((N, M), dtype=torch.bool))
def gen_float_comparison_tensors(N, M):
return (torch.rand(N, M), torch.rand(N, M), torch.empty((N, M), dtype=torch.bool))
te_bool = te.Dtype.Bool
binary_ops = [
('add', (lambda a, b: a + b), torch.add),
('mul', (lambda a, b: a * b), torch.mul),
('sub', (lambda a, b: a - b), torch.sub),
('div', (lambda a, b: a / b), torch.div),
('eq', (lambda a, b: te.Cast.make(te_bool, a == b)), torch.eq, gen_int_comparison_tensors),
('gt', (lambda a, b: te.Cast.make(te_bool, a > b)), torch.gt, gen_float_comparison_tensors),
('lt', (lambda a, b: te.Cast.make(te_bool, a < b)), torch.lt, gen_float_comparison_tensors),
('gte', (lambda a, b: te.Cast.make(te_bool, a >= b)), torch.greater_equal, gen_float_comparison_tensors),
('lte', (lambda a, b: te.Cast.make(te_bool, a <= b)), torch.less_equal, gen_float_comparison_tensors),
# ('neq', (lambda a, b: a != b), None)), # no one-op equivalent
# ('&', (lambda a, b: a & b), torch.bitwise_and), # requires more work to test
]
def nnc_relu(A, B):
def f(i, j):
return torch._C._te.ifThenElse(A.load([i, j]) < torch._C._te.ExprHandle.float(0),
torch._C._te.ExprHandle.float(0), A.load([i, j]))
return f
def pt_relu(a, b, c):
return torch.relu(a)
custom_ops = [
('relu', nnc_relu, pt_relu),
# ('nnc_mul_relu', nnc_mul_relu, pt_mul_relu)
# ('manual_sigmoid', nnc_manual_sigmoid, lambda a, b, c: torch.sigmoid(a, out=c))
]
def gen_custom_torch_fun(fn):
def pt_fun(a, b, out):
def fun():
return fn(a, b, out)
return fun
return pt_fun
def normalize_benchmarks(ops):
return [i + (None,) if len(i) == 3 else i for i in ops]
names = []
nnc_fns = []
pt_fns = []
shape_fns = []
for nnc_name, pt_op in unary_ops:
names.append(nnc_name)
nnc_fns.append(gen_unary_nnc_fun(nnc_name))
pt_fns.append(gen_unary_torch_fun(pt_op))
shape_fns.append(None)
for name, lmbda, pt_fn, shape_fn in normalize_benchmarks(binary_ops):
names.append(name)
nnc_fns.append(gen_binary_nnc_fun(lmbda))
pt_fns.append(gen_binary_torch_fun(pt_fn))
shape_fns.append(shape_fn)
for name, lmbda, pt_fn, shape_fn in normalize_benchmarks(custom_ops):
names.append(name)
nnc_fns.append(lmbda)
pt_fns.append(gen_custom_torch_fun(pt_fn))
shape_fns.append(shape_fn)
benchmarks = list(zip(names, nnc_fns, pt_fns, shape_fns))
def run_benchmarks(benchmarks, sizes):
df = pd.DataFrame(columns=['name', 'N', 'M', 'nnc_time', 'torch_time', 'ratio'])
with torch.no_grad():
for name, nnc_fun, torch_fun, shape_fn in benchmarks:
for N, M in sizes:
iters = int(1e6 / (N + M))
with kernel_arena_scope():
if shape_fn is None:
tA = torch.rand(M, N).clamp(0.01, 0.99)
tB = torch.rand(M, N).clamp(0.01, 0.99)
tX = torch.empty(M, N)
tR = torch.empty(M, N)
else:
tA, tB, tX = shape_fn(M, N)
tR = tX.clone()
def get_nnc_type(dtype):
if dtype == torch.float:
return torch._C._te.Dtype.Float
elif dtype == torch.long:
return torch._C._te.Dtype.Long
dtype = get_nnc_type(tA.dtype)
dM = torch._C._te.ExprHandle.int(M)
dN = torch._C._te.ExprHandle.int(N)
A = torch._C._te.Placeholder('A', dtype, [dM, dN])
B = torch._C._te.Placeholder('B', dtype, [dM, dN])
dim_args = [torch._C._te.DimArg(*args) for args in [(dM, 'm'), (dN, 'n')]]
compute = nnc_fun(A, B)
X = torch._C._te.Compute('X', dim_args, compute)
loopnest = torch._C._te.LoopNest([X])
loopnest.prepare_for_codegen()
stmt = torch._C._te.simplify(loopnest.root_stmt())
cg = torch._C._te.construct_codegen('llvm', stmt, [torch._C._te.BufferArg(x) for x in [A, B, X]])
# warmup
for _ in range(10):
cg.call([tA, tB, tX])
start = time.time()
for it in range(iters):
cg.call([tA, tB, tX])
time1 = time.time() - start
fn = torch_fun(tA, tB, tR)
# warmup
for _ in range(10):
tR = fn()
start = time.time()
for it in range(iters):
tR = fn()
time2 = time.time() - start
df = df.append({'name': name, 'N': N, 'M': M, 'nnc_time': time1,
'torch_time': time2, 'ratio': time2 / time1}, ignore_index=True)
print(name, N, M)
print(time2 / time1, time1, time2)
print()
def check_correctness(a, b):
if not np.allclose(a, b):
print(name)
assert(np.allclose(a, b))
check_correctness(tX, tR)
return df
def dump_plot(df, sizes):
keys = []
vals = []
indexed = df[df['N'] == df['M']]
for index, row in indexed.iterrows():
keys.append(row['name'])
vals.append(row['ratio'])
keys = keys[::len(sizes)]
sns.set(rc={'figure.figsize' : (5.0, len(keys) * 0.5)})
cmap = sns.diverging_palette(10, 120, n=9, as_cmap=True)
np_vals = np.array([vals]).reshape(-1, len(sizes))
g = sns.heatmap(np_vals, annot=True, cmap=cmap, center=1.0, yticklabels=True)
plt.yticks(rotation=0)
plt.title('PyTorch performance divided by NNC performance (single core)')
plt.xlabel('Size of NxN matrix')
plt.ylabel('Operation')
g.set_yticklabels(keys)
g.set_xticklabels(sizes)
plt.savefig('nnc.png')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Runs NNC microbenchmarks')
parser.add_argument('--multi_threaded', action='store_true', help='Run with more than one thread')
args = parser.parse_args()
if not args.multi_threaded:
torch.set_num_threads(1)
sizes = [1, 4, 16, 64, 256, 1024]
df = run_benchmarks(benchmarks, [(i, i) for i in sizes])
dump_plot(df, sizes)
| pytorch-master | benchmarks/tensorexpr/microbenchmarks.py |
import torch
class TorchTensorEngine(object):
def rand(self, shape, device=None, dtype=None, requires_grad=False):
return torch.rand(shape, device=device, dtype=dtype, requires_grad=requires_grad)
def randn(self, shape, device=None, dtype=None, requires_grad=False):
return torch.randn(shape, device=device, dtype=dtype, requires_grad=requires_grad)
def nchw_rand(self, shape, device=None, requires_grad=False):
return self.rand(shape, device=device, requires_grad=requires_grad)
def reset(self, _):
pass
def rand_like(self, v):
return torch.rand_like(v)
def numpy(self, t):
return t.cpu().numpy()
def mul(self, t1, t2):
return t1 * t2
def add(self, t1, t2):
return t1 + t2
def batch_norm(self, data, mean, var, training):
return torch.nn.functional.batch_norm(data, mean, var, training=training)
def instance_norm(self, data):
return torch.nn.functional.instance_norm(data)
def layer_norm(self, data, shape):
return torch.nn.functional.layer_norm(data, shape)
def sync_cuda(self):
torch.cuda.synchronize()
def backward(self, tensors, grad_tensors, _):
torch.autograd.backward(tensors, grad_tensors=grad_tensors)
def sum(self, data, dims):
return torch.sum(data, dims)
def softmax(self, data, dim=None, dtype=None):
return torch.nn.functional.softmax(data, dim, dtype)
def cat(self, inputs, dim=0):
return torch.cat(inputs, dim=dim)
def clamp(self, data, min, max):
return torch.clamp(data, min=min, max=max)
def relu(self, data):
return torch.nn.functional.relu(data)
def tanh(self, data):
return torch.tanh(data)
def max_pool2d(self, data, kernel_size, stride=1):
return torch.nn.functional.max_pool2d(data, kernel_size, stride=stride)
def avg_pool2d(self, data, kernel_size, stride=1):
return torch.nn.functional.avg_pool2d(data, kernel_size, stride=stride)
def conv2d_layer(self, ic, oc, kernel_size, groups=1):
return torch.nn.Conv2d(ic, oc, kernel_size, groups=groups)
def matmul(self, t1, t2):
return torch.matmul(t1, t2)
def to_device(self, module, device):
return module.to(device)
| pytorch-master | benchmarks/tensorexpr/pt_engine.py |
from . import benchmark
import torch
class SwishBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
self.data = self.rand([M, N], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.inputs = [self.data]
self.zeros = torch.zeros(M, N, device=device)
self.six = self.zeros + 6.0
self.three = self.zeros + 3.0
self.sixth = self.zeros + 1.0 / 6.0
def forward(self, inp):
y = inp * (torch.min(torch.relu(inp), self.six) + self.three) * self.sixth
return y
def reference(self):
return self.numpy(self.forward(self.data))
def config(self):
return [self.M, self.N]
@staticmethod
def module():
return "swish"
def memory_workload(self):
if self.mode == "fwd":
sol_count = 1 + 1
algorithmic_count = 3 + 1
else:
sol_count = (1 + 1) + (1 + 1)
algorithmic_count = (3 + 1) + (3 + 1)
buffer_size = self.M * self.N
return {
"sol": buffer_size * sol_count,
"algorithmic": buffer_size * algorithmic_count,
}
@staticmethod
def default_configs():
return [[128, 1 << 16]]
benchmark.register_benchmark_class(SwishBench)
| pytorch-master | benchmarks/tensorexpr/swish.py |
from . import benchmark
class ConvImplBench(benchmark.Benchmark):
def __init__(self, case, mode, device, dtype, kernel_size, N, iC, H, W, oC):
super().__init__(mode, device, dtype)
self.case = case
self.kernel_size = kernel_size
self.N = N
self.iC = iC
self.H = H
self.W = W
self.oC = oC
self.data = self.rand(
[N, iC, H, W], device=device, requires_grad=self.requires_grad
)
if case == "conv":
self.groups = 1
elif case == "depthwise_conv":
self.groups = iC
else:
raise ValueError("invalid case: %s" % (case))
self.conv = self.conv2d_layer(iC, oC, kernel_size, groups=self.groups)
if device != "cpu":
self.to_device(self.conv, device)
def forward(self):
y = self.conv(self.data)
return y
def config(self):
return [self.kernel_size, self.N, self.iC, self.H, self.W, self.oC]
def memory_workload(self):
if self.mode == "fwd":
sol_count = {"i": 1, "o": 1, "k": 1}
algorithmic_count = {"i": 1, "o": 1, "k": 1}
else:
sol_count = {"i": 1 + 1, "o": 1 + 1, "k": 1 + 1}
algorithmic_count = {"i": 1 + (1 + 1), "o": 1 + (1 + 1), "k": 1 + (1 + 1)}
buffer_size = {
"i": self.N * self.iC * self.H * self.W,
"o": self.N * self.oC * self.H * self.W,
"k": self.oC
* (self.iC / self.groups)
* self.kernel_size
* self.kernel_size,
}
sol_size = 0
algorithmic_size = 0
for key in sol_count:
sol_size += buffer_size[key] * sol_count[key]
algorithmic_size += buffer_size[key] * algorithmic_count[key]
return {"sol": sol_size, "algorithmic": algorithmic_size}
def compute_workload(self):
if self.mode == "fwd":
count = 1
elif self.mode == "both":
count = 1 + (1 + 1)
else:
raise ValueError("invalid mode: %s" % (self.mode))
op_count = (
self.N
* self.iC
/ self.groups
* self.oC
* self.kernel_size
* self.kernel_size
* self.H
* self.W
)
op_count *= 2
return op_count * count
@staticmethod
def default_configs():
return [
[3, 64, 32, 128, 128, 64],
]
class ConvBench(ConvImplBench):
def __init__(self, *args):
super().__init__("conv", *args)
@staticmethod
def module():
return "conv"
class DepthwiseConvBench(ConvImplBench):
def __init__(self, *args):
super().__init__("depthwise_conv", *args)
@staticmethod
def module():
return "depthwise_conv"
benchmark.register_benchmark_class(ConvBench)
benchmark.register_benchmark_class(DepthwiseConvBench)
| pytorch-master | benchmarks/tensorexpr/conv.py |
from . import benchmark
from . import tensor_engine
class NormalizationBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, N, C, H, W):
super().__init__(mode, device, dtype)
self.N = N
self.C = C
self.H = H
self.W = W
self.data = self.nchw_rand(
[self.N, self.C, self.H, self.W],
device=device, dtype=dtype,
requires_grad=self.requires_grad,
)
self.running_mean = self.rand([self.C], device=device, dtype=dtype)
self.running_var = self.rand([self.C], device=device, dtype=dtype)
self.training = self.mode == "both"
def config(self):
return [self.N, self.C, self.H, self.W]
def memory_workload(self):
if self.mode == "fwd":
sol_count = 1 + 1
algorithmic_count = 2 + 1
else:
sol_count = (1 + 1) + (1 + 1)
algorithmic_count = (2 + 1) + (3 + 1)
buffer_size = self.N * self.C * self.H * self.W * 4
return {
"sol": buffer_size * sol_count,
"algorithmic": buffer_size * algorithmic_count,
}
@staticmethod
def default_configs():
return [[128, 32, 128, 128]]
class BatchNormBench(NormalizationBench):
def forward(self):
y = self.batch_norm(
self.data, self.running_mean, self.running_var, training=self.training
)
return y
@staticmethod
def module():
return "batchnorm"
class InstanceNormBench(NormalizationBench):
def forward(self):
y = self.instance_norm(self.data)
return y
@staticmethod
def module():
return "instance_norm"
def is_supported(self):
return tensor_engine.is_supported(self.instance_norm)
class LayerNormBench(NormalizationBench):
def forward(self):
y = self.layer_norm(self.data, [self.H, self.W])
return y
@staticmethod
def module():
return "layernorm"
benchmark.register_benchmark_class(BatchNormBench)
benchmark.register_benchmark_class(InstanceNormBench)
benchmark.register_benchmark_class(LayerNormBench)
| pytorch-master | benchmarks/tensorexpr/normalization.py |
from . import benchmark
import itertools
import numpy as np
import torch
class BroadcastMulBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, case, M, N, K):
super().__init__(mode, device, dtype)
self.case = case
self.M = M
self.N = N
self.K = K
if case == "row":
self.d1 = self.rand(
[M, N, 1], device=device, dtype=dtype, requires_grad=self.requires_grad
)
self.d2 = self.rand(
[M, 1, K], device=device, dtype=dtype, requires_grad=self.requires_grad
)
elif case == "mid":
self.d1 = self.rand(
[M, N, 1], device=device, dtype=dtype, requires_grad=self.requires_grad
)
self.d2 = self.rand(
[1, N, K], device=device, dtype=dtype, requires_grad=self.requires_grad
)
elif case == "col":
self.d1 = self.rand(
[M, 1, K], device=device, dtype=dtype, requires_grad=self.requires_grad
)
self.d2 = self.rand(
[1, N, K], device=device, dtype=dtype, requires_grad=self.requires_grad
)
else:
raise ValueError("invalid case: %s" % (case))
self.inputs = [self.d1, self.d2]
def forward(self, d1, d2):
y = d1 + d2
return y
def reference(self):
return self.numpy(self.d1) + self.numpy(self.d2)
def config(self):
return [self.M, self.N, self.K]
@staticmethod
def default_configs():
return [[128, 256, 128]]
def memory_workload(self):
if self.mode == "fwd":
sol_count = 1
algorithmic_count = 1
else:
sol_count = (1) + (1)
algorithmic_count = 1 + (1 + 1)
buffer_size = self.M * self.N * self.K
return {
"sol": buffer_size * sol_count,
"algorithmic": buffer_size * algorithmic_count,
}
class BroadcastRowBench(BroadcastMulBench):
def __init__(self, mode, device, dtype, M, N, K):
super(BroadcastRowBench, self).__init__(mode, device, dtype, "row", M, N, K)
@staticmethod
def module():
return "broadcast_row"
class BroadcastMidBench(BroadcastMulBench):
def __init__(self, mode, device, dtype, M, N, K):
super(BroadcastMidBench, self).__init__(mode, device, dtype, "mid", M, N, K)
@staticmethod
def module():
return "broadcast_mid"
class BroadcastColBench(BroadcastMulBench):
def __init__(self, mode, device, dtype, M, N, K):
super(BroadcastColBench, self).__init__(mode, device, dtype, "col", M, N, K)
@staticmethod
def module():
return "broadcast_col"
class BroadcastThreeArgs(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N, K, L):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
self.K = K
self.L = L
self.d1 = self.rand([M, N], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.d2 = self.rand([K, M, 1], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.d3 = self.rand(
[L, K, 1, 1], device=device, dtype=dtype, requires_grad=self.requires_grad
)
self.inputs = [self.d1, self.d2, self.d3]
def forward(self, d1, d2, d3):
y = d1 + d2 + d3
return y
def reference(self):
return self.numpy(self.d1) + self.numpy(self.d2) + self.numpy(self.d3)
def config(self):
return [self.M, self.N, self.K, self.L]
@staticmethod
def default_configs():
return [[32, 16, 64, 128]]
def memory_workload(self):
if self.mode == "fwd":
sol_count = 1
algorithmic_count = 1
else:
sol_count = (1) + (1)
algorithmic_count = 1 + (1 + 1 + 1)
buffer_size = self.M * self.N * self.K * self.L * 4
return {
"sol": buffer_size * sol_count,
"algorithmic": buffer_size * algorithmic_count,
}
@staticmethod
def module():
return "broadcast_3args"
# benchmark.register_benchmark_class(BroadcastRowBench)
# benchmark.register_benchmark_class(BroadcastMidBench)
# benchmark.register_benchmark_class(BroadcastColBench)
# benchmark.register_benchmark_class(BroadcastThreeArgs)
# TODO: merge this with elementwise bench
# A template class for elementwise operations.
# A derived class will override the class instance to customize its behavior.
class BroadcastBench(benchmark.Benchmark):
# List of customization class variables.
op_str = None
binary_op_pt_func = None
binary_op_np_func = None
unary_op_pt_func = None
unary_op_np_func = None
split_input = True
def __init__(self, mode, device, dtype, M, N, K):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
self.K = K
self.d1 = self.rand([M, N], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.d2 = self.rand([K, 1, N], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.d3 = self.rand([M, N], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.d4 = self.rand([K, M, 1], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.inputs = [self.d1, self.d2, self.d3, self.d4]
def _eval(self, d1, d2, d3, d4, binary_op, unary_op):
if not binary_op:
def binary_op(x, y):
return x + y
if not unary_op:
def unary_op(x):
return x
if self.split_input:
d1 = unary_op(d1)
d2 = unary_op(d2)
d3 = unary_op(d3)
d4 = unary_op(d4)
else:
d1, d2, d3, d4 = (
unary_op(d1),
unary_op(d2),
unary_op(d1 + 0.001),
unary_op(d4),
)
a = binary_op(d1, d2)
b = binary_op(d3, d4)
c = a + b
return c
def forward(self, d1, d2, d3, d4):
binary_op = self.__class__.binary_op_pt_func
unary_op = self.__class__.unary_op_pt_func
return self._eval(d1, d2, d3, d4, binary_op, unary_op)
def reference(self):
binary_op = self.__class__.binary_op_np_func
unary_op = self.__class__.unary_op_np_func
[d1, d2, d3, d4] = [self.numpy(d) for d in [self.d1, self.d2, self.d3, self.d4]]
return self._eval(d1, d2, d3, d4, binary_op, unary_op)
def config(self):
return [self.M, self.N, self.K]
@classmethod
def module(cls):
return "broadcast_" + cls.op_str
def memory_workload(self):
input_count = len(self.inputs)
if self.mode == "fwd":
if self.split_input:
sol_count = 1
algorithmic_count = 1
else:
sol_count = 1
algorithmic_count = 1
else:
if self.split_input:
sol_count = 1
algorithmic_count = input_count
else:
sol_count = 1
algorithmic_count = input_count
buffer_size = self.M * self.N * self.K * 4
return {
"sol": buffer_size * sol_count,
"algorithmic": buffer_size * algorithmic_count,
}
@staticmethod
def default_configs():
return [[1 << 8, 1 << 7, 1 << 9]]
def register_broadcast_ops():
binary_op_list = [
["mul", lambda a, b: a * b],
["add", lambda a, b: a + b],
["sub", lambda a, b: a - b],
["div", lambda a, b: a / (b + 1e-4)],
[
"pow",
lambda a, b: torch.pow(a, b),
lambda a, b: np.power(a, b),
], # no fuson triggered
["max", lambda a, b: torch.max(a, b), lambda a, b: np.maximum(a, b)],
["min", lambda a, b: torch.min(a, b), lambda a, b: np.minimum(a, b)],
]
unary_op_list = [
["erf", lambda x: torch.erf(x), lambda x: np.erf(x)],
["exp", lambda x: torch.exp(x), lambda x: np.exp(x)],
["sin", lambda x: torch.sin(x), lambda x: np.sin(x)],
["cos", lambda x: torch.cos(x), lambda x: np.cos(x)],
]
for split_input, binary_op in itertools.product([True, False], binary_op_list):
# Make a copy of BroadcastBench
if len(binary_op) == 2:
[op_str, op_pt_func] = binary_op
op_np_func = op_pt_func
elif len(binary_op) == 3:
[op_str, op_pt_func, op_np_func] = binary_op
split_str = "split" if split_input else "shared"
op_str = split_str + "_" + op_str
bm_cls = type("BroadcastBench_" + op_str, (BroadcastBench,), {})
bm_cls.op_str = op_str
bm_cls.binary_op_pt_func = op_pt_func
bm_cls.binary_op_np_func = op_np_func
bm_cls.split_input = split_input
benchmark.register_benchmark_class(bm_cls)
for split_input, unary_op in itertools.product([True, False], unary_op_list):
# Make a copy of BroadcastBench
if len(unary_op) == 2:
[op_str, op_pt_func] = unary_op
op_np_func = op_pt_func
elif len(unary_op) == 3:
[op_str, op_pt_func, op_np_func] = unary_op
split_str = "split" if split_input else "shared"
op_str = split_str + "_" + op_str
bm_cls = type("BroadcastBench_" + op_str, (BroadcastBench,), {})
bm_cls.op_str = op_str
bm_cls.unary_op_pt_func = op_pt_func
bm_cls.unary_op_np_func = op_np_func
bm_cls.split_input = split_input
benchmark.register_benchmark_class(bm_cls)
register_broadcast_ops()
| pytorch-master | benchmarks/tensorexpr/broadcast.py |
from . import benchmark
import scipy.special
class SoftmaxBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
self.dtype = dtype
self.inputs = [self.randn(
[M, N], device=device, dtype=dtype, requires_grad=self.requires_grad
)]
def forward(self, inputs):
x = self.add(inputs, 0.001)
y = self.softmax(x, dim=-1, dtype=self.dtype)
return y
def reference(self):
return scipy.special.softmax(self.numpy(self.inputs), axis=-1)
def config(self):
return [self.M, self.N]
@staticmethod
def module():
return "softmax"
def memory_workload(self):
if self.mode == "fwd":
sol_count = 1 + 1
algorithmic_count = 3 + 1
else:
sol_count = (1 + 1) + (1 + 1)
algorithmic_count = (3 + 1) + (3 + 1)
buffer_size = self.M * self.N
return {
"sol": buffer_size * sol_count,
"algorithmic": buffer_size * algorithmic_count,
}
@staticmethod
def default_configs():
return [
[480, 20],
[1 << 15, 32],
[128, 1 << 16],
]
benchmark.register_benchmark_class(SoftmaxBench)
| pytorch-master | benchmarks/tensorexpr/softmax.py |
import argparse
import itertools
from . import benchmark
import os
from . import tensor_engine
from . import attention # noqa: F401
from . import broadcast # noqa: F401
from . import concat # noqa: F401
# from . import conv # noqa: F401
from . import elementwise # noqa: F401
from . import matmul # noqa: F401
# from . import normalization # noqa: F401
# from . import pooling # noqa: F401
from . import reduction # noqa: F401
from . import softmax # noqa: F401
from . import rnn_eltwise # noqa: F401
from . import swish # noqa: F401
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Benchmark operators in specific shapes.
Works only with Python3.\n A few examples:
* benchmark.py: runs all the default configs with all the benchmarks.
* benchmark.py reduce: runs all the default configs with all benchmark with a prefix 'reduce'
* benchmark.py layernorm_fwd_cpu_128_32_128_128: run a particular benchmark in that config""",
)
parser.add_argument(
"benchmark_names",
type=str,
default=None,
nargs="*",
help="name of the benchmark to run",
)
parser.add_argument(
"--device",
type=str,
default="cpu,cuda",
help="a comma separated list of device names",
)
parser.add_argument(
"--mode",
type=str,
default="fwd,both",
help="a comma separated list of running modes",
)
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="a comma separated list of Data Types: {float32[default], float16}",
)
parser.add_argument(
"--input-iter",
type=str,
default=None,
help="a comma separated list of Tensor dimensions that includes a start, \
stop, and increment that can be constant or a power of 2 \
{start:stop:inc,start:stop:pow2}",
)
parser.add_argument(
"--engine",
type=str,
default="pt",
help="the underlying tensor engine. only pt for now",
)
parser.add_argument(
"--jit_mode",
type=str,
default="trace",
help="the jit mode to use: one of {trace, none}",
)
parser.add_argument(
"--cuda_pointwise_loop_levels",
type=int,
default=None,
help="num of loop levesl for Cuda pointwise operations: 2 or 3",
)
parser.add_argument(
"--cuda_pointwise_block_count",
type=int,
default=None,
help="num of block for Cuda pointwise operations",
)
parser.add_argument(
"--cuda_pointwise_block_size",
type=int,
default=None,
help="num of blocks for Cuda pointwise operations",
)
parser.add_argument(
"--cuda_fuser",
type=str,
default="te",
help="The Cuda fuser backend to use: one of {te, nvf, old, none}",
)
parser.add_argument(
"--output",
type=str,
default="stdout",
help="The output format of the benchmark run {stdout[default], json}",
)
parser.add_argument(
"--print-ir",
action='store_true',
help="Print the IR graph of the Fusion.",
)
parser.add_argument(
"--print-kernel",
action='store_true',
help="Print generated kernel(s).",
)
parser.add_argument(
"--no-dynamic-shape",
action='store_true',
help="Disable shape randomization in dynamic benchmarks.",
)
parser.add_argument(
"--cpu_fusion",
default=False,
action='store_true',
help="Enable CPU fusion.",
)
parser.add_argument(
"--cat_wo_conditionals",
default=False,
action='store_true',
help="Enable CAT wo conditionals.",
)
args = parser.parse_args()
if args.cuda_fuser == "te":
import torch
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_texpr_fuser_enabled(True)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._get_graph_executor_optimize(True)
elif args.cuda_fuser == "old":
import torch
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_override_can_fuse_on_gpu(True)
elif args.cuda_fuser == "nvf":
import torch
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(True)
torch._C._get_graph_executor_optimize(True)
else :
raise ValueError("Undefined fuser: {}".format(args.cuda_fuser))
if args.cpu_fusion:
import torch
torch._C._jit_override_can_fuse_on_cpu(True)
else:
import torch
torch._C._jit_override_can_fuse_on_cpu(False)
if args.cat_wo_conditionals:
import torch
torch._C._jit_cat_wo_conditionals(True)
else:
import torch
torch._C._jit_cat_wo_conditionals(False)
def set_global_threads(num_threads):
os.environ["OMP_NUM_THREADS"] = str(num_threads)
os.environ["MKL_NUM_THREADS"] = str(num_threads)
os.environ["TVM_NUM_THREADS"] = str(num_threads)
os.environ["NNC_NUM_THREADS"] = str(num_threads)
devices = args.device.split(",")
# accept 'gpu' as an alternative as the 'cuda' device
devices = ["cuda" if device == "gpu" else device for device in devices]
cpu_count = 0
for index, device in enumerate(devices):
if device.startswith("cpu"):
cpu_count += 1
if cpu_count > 1:
raise ValueError(
"more than one CPU device is not allowed: %d" % (cpu_count)
)
if device == "cpu":
continue
num_threads_str = device[3:]
try:
# see if the device is in 'cpu1' or 'cpu4' format
num_threads = int(num_threads_str)
set_global_threads(num_threads)
devices[index] = "cpu"
except ValueError:
continue
modes = args.mode.split(",")
datatypes = args.dtype.split(",")
for index, dtype in enumerate(datatypes):
datatypes[index] = getattr(torch, dtype)
if not datatypes[index] :
raise AttributeError("DataType: {} is not valid!".format(dtype))
tensor_engine.set_engine_mode(args.engine)
def run_default_configs(bench_cls, allow_skip=True):
for mode, device, dtype, config in itertools.product(
modes, devices, datatypes, bench_cls.default_configs()
):
bench = bench_cls(mode, device, dtype, *config)
bench.output_type = args.output
bench.jit_mode = args.jit_mode
if not bench.is_supported():
if allow_skip:
continue
else:
raise ValueError(
"attempted to run an unsupported benchmark: %s" % (bench.desc())
)
bench.run(args)
def run_with_input_iter(bench_cls, input_iter, allow_skip=True):
tensor_dim_specs = input_iter.split(',')
tensor_dim_specs = [dim.split(':') for dim in tensor_dim_specs]
configs = []
for start, stop, inc in tensor_dim_specs:
dim_list = []
if inc == 'pow2' :
curr = int(start)
while curr <= int(stop) :
dim_list.append(curr)
curr <<= 1
elif inc == 'pow2+1' :
curr = int(start)
while curr <= int(stop) :
dim_list.append(curr)
curr -= 1
curr <<= 1
curr += 1
else :
dim_list = list(range(int(start), int(stop) + int(inc), int(inc)))
configs.append(dim_list)
configs = itertools.product(*configs)
for mode, device, dtype, config in itertools.product(
modes, devices, datatypes, list(configs)
):
bench = bench_cls(mode, device, dtype, *config)
bench.output_type = args.output
bench.jit_mode = args.jit_mode
if not bench.is_supported():
if allow_skip:
continue
else:
raise ValueError(
"attempted to run an unsupported benchmark: %s" % (bench.desc())
)
bench.run(args)
benchmark_classes = benchmark.benchmark_classes
if not args.benchmark_names:
# by default, run all the benchmarks
for benchmark_cls in benchmark_classes:
run_default_configs(benchmark_cls, allow_skip=True)
else:
for name in args.benchmark_names:
# if the name is the prefix of a benchmark class, run all the benchmarks for that class
match_class_name = False
for bench_cls in benchmark_classes:
if name in bench_cls.module():
match_class_name = True
if (args.input_iter is not None) and bench_cls.input_iterable() :
run_with_input_iter(bench_cls, args.input_iter, allow_skip=True)
else :
if args.input_iter is not None :
print("WARNING: Incompatible benchmark class called with input_iter arg: {}".format(name))
run_default_configs(bench_cls, allow_skip=True)
if match_class_name:
continue
# if not a class module, parse the config and call it that way
match_class_name = False
for bench_cls in benchmark_classes:
cls_module = bench_cls.module()
if name.startswith(cls_module):
match_class_name = True
if name[len(cls_module)] != "_":
raise ValueError("invalid name: %s" % (name))
config_str = name[(len(cls_module) + 1) :]
config = config_str.split("_")
if len(config) < 2:
raise ValueError("invalid config: %s" % config)
mode, device = config[0:2]
# TODO: make sure virtual devices such as 'cpu1' and 'cpu4' are supported.
if mode not in ["fwd", "both"]:
raise ValueError("invalid mode: %s" % (mode))
for i, entry in enumerate(config):
try:
value = int(entry)
config[i] = value
except ValueError:
pass
# TODO: output dtype in the config and parse it back from the str
bench = bench_cls(config[0], config[1], torch.float32, *config[2:])
bench.jit_mode = args.jit_mode
bench.output_type = args.output
bench.run(args)
if not match_class_name:
available_classes = ", ".join(
[bench_cls.module() for bench_cls in benchmark_classes]
)
raise ValueError(
"invalid name: %s\nAvailable benchmark classes:\n%s"
% (name, available_classes)
)
if __name__ == "__main__":
main()
| pytorch-master | benchmarks/tensorexpr/__main__.py |
import torch
from pyarkbench import Benchmark, Timer, default_args
use_new = True
class Basic(Benchmark):
def benchmark(self):
x = [torch.ones(200, 200) for i in range(30)]
with Timer() as big1:
torch.save(x, "big_tensor.zip", _use_new_zipfile_serialization=use_new)
with Timer() as big2:
v = torch.load("big_tensor.zip")
x = [torch.ones(10, 10) for i in range(200)]
with Timer() as small1:
torch.save(x, "small_tensor.zip", _use_new_zipfile_serialization=use_new)
with Timer() as small2:
v = torch.load("small_tensor.zip")
return {
"Big Tensors Save": big1.ms_duration,
"Big Tensors Load": big2.ms_duration,
"Small Tensors Save": small1.ms_duration,
"Small Tensors Load": small2.ms_duration,
}
if __name__ == '__main__':
bench = Basic(*default_args.bench())
print("Use zipfile serialization:", use_new)
results = bench.run()
bench.print_stats(results, stats=['mean', 'median'])
| pytorch-master | benchmarks/serialization/simple_measurement.py |
"""Basic runner for the instruction count microbenchmarks.
The contents of this file are placeholders, and will be replaced by more
expressive and robust components (e.g. better runner and result display
components) in future iterations. However this allows us to excercise the
underlying benchmark generation infrastructure in the mean time.
"""
import argparse
import sys
from typing import List
from applications import ci
from core.expand import materialize
from definitions.standard import BENCHMARKS
from execution.runner import Runner
from execution.work import WorkOrder
def main(argv: List[str]) -> None:
work_orders = tuple(
WorkOrder(label, autolabels, timer_args, timeout=600, retries=2)
for label, autolabels, timer_args in materialize(BENCHMARKS)
)
results = Runner(work_orders).run()
for work_order in work_orders:
print(work_order.label, work_order.autolabels, work_order.timer_args.num_threads, results[work_order].instructions)
if __name__ == "__main__":
modes = {
"debug": main,
"ci": ci.main,
}
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str, choices=list(modes.keys()), default="debug")
args, remaining_args = parser.parse_known_args(sys.argv)
modes[args.mode](remaining_args[1:])
| pytorch-master | benchmarks/instruction_counts/main.py |
pytorch-master | benchmarks/instruction_counts/core/__init__.py |
|
"""Type annotations for various benchmark objects."""
from typing import Any, Dict, Optional, Tuple, Union
from core.api import AutoLabels, TimerArgs, GroupedBenchmark
# =============================================================================
# == Benchmark schema =========================================================
# =============================================================================
""" (There is a TL;DR at the end for ad-hoc benchmarks.)
The end state for representing a benchmark is:
```
Tuple[
Tuple[
Tuple[str, ...], # Primary key
core.api.AutoLabels, # Secondary key
core.api.TimerArgs, # Value
],
...
]
```
For example:
```
[
(("pointwise", "add"), AutoLabels(..., Language.PYTHON), TimerArgs(...)),
(("pointwise", "add"), AutoLabels(..., Language.CPP), TimerArgs(...)),
...
]
```
However, such a flat list is somewhat tedious to maintain (and read), because
there is significant duplication in the key structure. So instead, we would
like to define something like:
```
{
"pointwise" : {
"add": {
None: GroupedStmts(...),
"with alpha": GroupedStmts(...),
},
"mul": GroupedStmts(...),
},
"matmul": GroupedStmts(...),
}
```
and then parse out a flat representation. The type declarations below are
simply formalizing the structure of nested dictionaries with string or tuple
of string keys.
TL;DR
If you only care about writing an ad-hoc benchmark for a PR, just use a
flat dictionary and everything will work. For example:
```
{
"case 0": TimerArgs(...),
"case 1": TimerArgs(...),
"case 2": GroupedStmts(...),
...
}
```
"""
# Allow strings in definition for convenience, and None to signify a base
# case. (No subsequent entry needed. See the "add" example above.)
Label = Tuple[str, ...]
_Label = Union[Label, Optional[str]]
# MyPy does not currently support recursive types:
# https://github.com/python/mypy/issues/731
#
# So while the correct type definition would be:
# _Value = Union[
# # Base case:
# Union[TimerArgs, GroupedBenchmark],
#
# # Recursive case:
# Dict[Label, "_Value"],
# ]
# we instead have to use Any and rely on runtime asserts when flattening.
_Value = Union[
Union[TimerArgs, GroupedBenchmark],
Dict[_Label, Any],
]
Definition = Dict[_Label, _Value]
# We initially have to parse (flatten) to an intermediate state in order to
# build TorchScript models since multiple entries will share the same model
# artifact.
FlatIntermediateDefinition = Dict[Label, Union[TimerArgs, GroupedBenchmark]]
# Final parsed schema.
FlatDefinition = Tuple[Tuple[Label, AutoLabels, TimerArgs], ...]
| pytorch-master | benchmarks/instruction_counts/core/types.py |
"""Key enums and structs used to handle data flow within the benchmark."""
import dataclasses
import enum
import itertools as it
import re
import textwrap
from typing import Dict, List, Optional, Set, Tuple, Union, TYPE_CHECKING
from worker.main import WorkerTimerArgs
if TYPE_CHECKING:
# Benchmark utils are only partially strict compliant, so MyPy won't follow
# imports using the public namespace. (Due to an exclusion rule in
# mypy-strict.ini)
from torch.utils.benchmark.utils.timer import Language
else:
from torch.utils.benchmark import Language
# Note:
# WorkerTimerArgs is defined in worker.main so that the worker does not
# depend on any files, including core.api. We mirror it with a public symbol
# `TimerArgs` for API consistency.
TimerArgs = WorkerTimerArgs
class RuntimeMode(enum.Enum):
EAGER = "Eager"
JIT = "TorchScript"
EXPLICIT = ""
class AutogradMode(enum.Enum):
FORWARD = "Forward"
FORWARD_BACKWARD = "Forward + Backward"
EXPLICIT = ""
@dataclasses.dataclass(frozen=True)
class AutoLabels:
"""Labels for a TimerArgs instance which are inferred during unpacking."""
runtime: RuntimeMode
autograd: AutogradMode
language: Language
@property
def as_dict(self) -> Dict[str, str]:
"""Dict representation for CI reporting."""
return {
"runtime": self.runtime.value,
"autograd": self.autograd.value,
"language": "Python" if self.language == Language.PYTHON else "C++",
}
@dataclasses.dataclass(frozen=True)
class GroupedSetup:
py_setup: str = ""
cpp_setup: str = ""
global_setup: str = ""
def __post_init__(self) -> None:
for field in dataclasses.fields(self):
assert field.type == str
value: str = getattr(self, field.name)
object.__setattr__(self, field.name, textwrap.dedent(value))
@dataclasses.dataclass(frozen=True)
class GroupedBenchmark:
"""Base class for defining groups of benchmarks.
Concrete interfaces:
- `core.api.GroupedStmts` (init_from_stmts)
- `core.api.GroupedModules` (init_from_model)
- `core.api.GroupedVariants` (init_from_variants)
There are a variety of dimensions along which one might wish to measure
PyTorch performance:
- Python, C++
- Eager, TorchScript
- Single threaded, multi threaded
- Training, inference
It is useful to define them together, both for clear, concise benchmark
definition and more intelligent post processing and analysis.
There are also two programming idioms in PyTorch. One is to write free form
code (so-called "NumPy with gradients"), and the other is to organize code
using `torch.nn.Module`s. (This is how common neural network layers are
exposed through the PyTorch API.) To support easy definition two simple
initialization methods are provided:
- `init_from_stmts`
- `init_from_model`
Those methods will document their unique constructor arguments, however
most are shared and are defined here:
setup: Defines how to initialize a benchmark in both Python and C++.
signature:
A string of the form:
```
f(a, b, ...) -> c
```
For instance, if Python setup is:
```
x = torch.ones((2,), requires_grad=True)
y = torch.ones((2,))
```
and the corresponding stmt is:
```
z = torch.dot(x, y)
```
Then the signature is `f(x, y) -> z`. `signature` is required any
time we need to generate part of a snippet:
- When calling an opaque model provided by `init_from_models`
- When `torchscript=True`
- When `autograd=True`
If a return value is not needed (e.g. because of in place mutation)
then `-> None` is valid, but a non-None return must be provided if
`autograd=True`
torchscript:
If True, also JIT the stmt or model and generate benchmarks which
call the scripted version. Requires that `signature` is defined.
autograd:
If True, generate both forward and forward + backward benchmarks.
Requires that `signature` is defined, and return value is not None.
num_threads:
Maps to the Timer arg. If a tuple of ints is provided, benchmarks
will be generated for each value.
A third method, `init_from_variants`, is provided to define several related
benchmarks at once.
"""
# These are the stmts which are actually executed by Timer. In the case of
# `GroupedStmts` (init_from_stmts) they are passed through from user args.
# In the case of `GroupedModules` (init_from_model) they are generated
# using `signature`. (e.g. `f(x, y) -> z` generates `z = model(x, y)`)
py_fwd_stmt: Optional[str]
cpp_fwd_stmt: Optional[str]
# Code block used to define a model. `init_from_stmts` will never populate
# `cpp_model_setup`, but if TorchScript is requested it will generate
# `py_model_setup` using `torch.jit.script`.
py_model_setup: Optional[str]
cpp_model_setup: Optional[str]
# True if this benchmark used `init_from_stmts`, otherwise False.
inferred_model_setup: bool
# Described above
setup: GroupedSetup
signature_args: Optional[Tuple[str, ...]]
signature_output: Optional[str]
torchscript: bool
autograd: bool
num_threads: Tuple[int, ...]
@classmethod
def init_from_stmts(
cls,
py_stmt: Optional[str] = None,
cpp_stmt: Optional[str] = None,
# Generic constructor arguments
setup: GroupedSetup = GroupedSetup(),
signature: Optional[str] = None,
torchscript: bool = False,
autograd: bool = False,
num_threads: Union[int, Tuple[int, ...]] = 1,
) -> "GroupedBenchmark":
"""Create a set of benchmarks from free-form statements.
This method of benchmark definition is analogous to Timer use, where
we simply execute the provided stmts.
"""
if py_stmt is not None:
py_stmt = textwrap.dedent(py_stmt)
if cpp_stmt is not None:
cpp_stmt = textwrap.dedent(cpp_stmt)
signature_args, signature_output = cls._parse_signature(signature)
py_model_setup = (
cls._model_from_py_stmt(
py_stmt=py_stmt,
signature_args=signature_args,
signature_output=signature_output
) if torchscript else None
)
return cls(
py_fwd_stmt=py_stmt,
cpp_fwd_stmt=cpp_stmt,
py_model_setup=py_model_setup,
cpp_model_setup=None,
inferred_model_setup=True,
setup=setup,
signature_args=signature_args,
signature_output=signature_output,
torchscript=torchscript,
autograd=autograd,
num_threads=(num_threads,) if isinstance(num_threads, int) else num_threads,
)
@classmethod
def init_from_model(
cls,
py_model_setup: Optional[str] = None,
cpp_model_setup: Optional[str] = None,
# Generic constructor arguments
setup: GroupedSetup = GroupedSetup(),
signature: Optional[str] = None,
torchscript: bool = False,
autograd: bool = False,
num_threads: Union[int, Tuple[int, ...]] = 1,
) -> "GroupedBenchmark":
"""Create a set of benchmarks using torch.nn Modules.
This method of benchmark creation takes setup code, and then calls
a model rather than a free form block of code. As a result, there are
two additional requirements compared to `init_from_stmts`:
- `signature` must be provided.
- A model (named "model") must be defined, either with `model = ...`
or `def model(...): ...` in Python or `auto model = ...` in C++.
"""
signature_args, signature_output = cls._parse_signature(signature)
if signature_args is None:
raise ValueError("signature is needed when initializing from model definitions.")
return cls(
*cls._make_model_invocation(signature_args, signature_output, RuntimeMode.EAGER),
py_model_setup=py_model_setup,
cpp_model_setup=cpp_model_setup,
inferred_model_setup=False,
setup=setup,
signature_args=signature_args,
signature_output=signature_output,
torchscript=torchscript,
autograd=autograd,
num_threads=(num_threads,) if isinstance(num_threads, int) else num_threads,
)
@classmethod
def init_from_variants(
cls,
py_block: str = "",
cpp_block: str = "",
num_threads: Union[int, Tuple[int, ...]] = 1,
) -> Dict[Union[Tuple[str, ...], Optional[str]], "GroupedBenchmark"]:
py_cases, py_setup, py_global_setup = cls._parse_variants(py_block, Language.PYTHON)
cpp_cases, cpp_setup, cpp_global_setup = cls._parse_variants(cpp_block, Language.CPP)
assert not py_global_setup
setup = GroupedSetup(
py_setup=py_setup,
cpp_setup=cpp_setup,
global_setup=cpp_global_setup,
)
# NB: The key is actually `Tuple[str, ...]`, however MyPy gets confused
# and we use the superset `Union[Tuple[str, ...], Optional[str]` to
# match the expected signature.
variants: Dict[Union[Tuple[str, ...], Optional[str]], GroupedBenchmark] = {}
seen_labels: Set[str] = set()
for label in it.chain(py_cases.keys(), cpp_cases.keys()):
if label in seen_labels:
continue
seen_labels.add(label)
py_lines = py_cases.get(label, [])
cpp_lines = cpp_cases.get(label, [])
n_lines = max(len(py_lines), len(cpp_lines))
py_lines += [""] * (n_lines - len(py_lines))
cpp_lines += [""] * (n_lines - len(cpp_lines))
lines = [
(py_stmt, cpp_stmt)
for py_stmt, cpp_stmt in zip(py_lines, cpp_lines)
if py_stmt or cpp_stmt
]
for i, (py_stmt, cpp_stmt) in enumerate(lines):
case = (f"Case: {i:>2}",) if len(lines) > 1 else ()
variants[(label,) + case] = GroupedBenchmark.init_from_stmts(
py_stmt=py_stmt or None,
cpp_stmt=cpp_stmt or None,
setup=setup,
num_threads=num_threads,
)
return variants
def __post_init__(self) -> None:
if self.autograd and self.signature_output is None:
raise ValueError("An output variable must be specified when `autograd=True`.")
if self.py_model_setup and "model" not in self.py_model_setup:
raise ValueError("`py_model_setup` appears to be missing `model` definition.")
if self.cpp_model_setup and "model" not in self.cpp_model_setup:
raise ValueError("`cpp_model_setup` appears to be missing `model` definition.")
# =========================================================================
# == String manipulation methods ==========================================
# =========================================================================
@staticmethod
def _parse_signature(
signature: Optional[str]
) -> Tuple[Optional[Tuple[str, ...]], Optional[str]]:
if signature is None:
return None, None
match = re.search(r"^f\((.*)\) -> (.*)$", signature)
if match is None:
raise ValueError(f"Invalid signature: `{signature}`")
args: Tuple[str, ...] = tuple(match.groups()[0].split(", "))
output: str = match.groups()[1].strip()
if "," in output:
raise ValueError(f"Multiple return values are not currently allowed: `{output}`")
if output == "None":
return args, None
return args, output
@staticmethod
def _model_from_py_stmt(
py_stmt: Optional[str],
signature_args: Optional[Tuple[str, ...]],
signature_output: Optional[str],
) -> str:
if py_stmt is None:
raise ValueError("`py_stmt` must be defined in order to derive a model.")
if signature_args is None:
raise ValueError("signature is needed in order to derive a model.")
return textwrap.dedent(f"""\
def model({', '.join(signature_args)}):
{{stmt_str}}
return {signature_output}
""").format(stmt_str=textwrap.indent(py_stmt, ' ' * 4))
@staticmethod
def _make_model_invocation(
signature_args: Tuple[str, ...],
signature_output: Optional[str],
runtime: RuntimeMode,
) -> Tuple[str, str]:
py_prefix, cpp_prefix = "", ""
if signature_output is not None:
py_prefix = f"{signature_output} = "
cpp_prefix = f"auto {signature_output} = "
if runtime == RuntimeMode.EAGER:
model_name = "model"
cpp_invocation = f"{cpp_prefix}{model_name}->forward({', '.join(signature_args)});"
else:
assert runtime == RuntimeMode.JIT
model_name = "jit_model"
cpp_invocation = textwrap.dedent(f"""\
std::vector<torch::jit::IValue> ivalue_inputs({{
{', '.join([f'torch::jit::IValue({a})' for a in signature_args])}
}});
{cpp_prefix}{model_name}.forward(ivalue_inputs);
""")
# NB:
# In python we invoke __call__, however C++ doesn't have an analogous
# method so we invoke `forward` instead. This means that that Python
# is doing extra work (e.g. checking hooks) compared to C++; however
# because this is the default user experience that's acceptable.
py_invocation = f"{py_prefix}{model_name}({', '.join(signature_args)})"
return py_invocation, cpp_invocation
@staticmethod
def _parse_variants(block: str, language: Language) -> Tuple[Dict[str, List[str]], str, str]:
block = textwrap.dedent(block).strip()
comment = "#" if language == Language.PYTHON else "//"
label_pattern = f"{comment} @(.+)$"
label = ""
lines_by_label: Dict[str, List[str]] = {"SETUP": [], "GLOBAL_SETUP": []}
for line in block.splitlines(keepends=False):
match = re.search(label_pattern, line.strip())
if match:
label = match.groups()[0]
if label.replace(" ", "_").upper() in ("SETUP", "GLOBAL_SETUP"):
label = label.replace(" ", "_").upper()
continue
lines_by_label.setdefault(label, [])
if line.startswith(comment):
line = ""
lines_by_label[label].append(line)
setup = "\n".join(lines_by_label.pop("SETUP"))
global_setup = "\n".join(lines_by_label.pop("GLOBAL_SETUP"))
return lines_by_label, setup, global_setup
# These are the user facing APIs.
GroupedStmts = GroupedBenchmark.init_from_stmts
GroupedModules = GroupedBenchmark.init_from_model
GroupedVariants = GroupedBenchmark.init_from_variants
| pytorch-master | benchmarks/instruction_counts/core/api.py |
import atexit
import shutil
import re
import textwrap
from typing import List, Optional, Tuple
from torch.utils.benchmark import _make_temp_dir
from core.api import GroupedBenchmark, TimerArgs
from core.types import Definition, FlatIntermediateDefinition, Label
_TEMPDIR: Optional[str] = None
def get_temp_dir() -> str:
global _TEMPDIR
if _TEMPDIR is None:
_TEMPDIR = _make_temp_dir(prefix="instruction_count_microbenchmarks", gc_dev_shm=True)
atexit.register(shutil.rmtree, path=_TEMPDIR)
return _TEMPDIR
def _flatten(
key_prefix: Label,
sub_schema: Definition,
result: FlatIntermediateDefinition
) -> None:
for k, value in sub_schema.items():
if isinstance(k, tuple):
assert all(isinstance(ki, str) for ki in k)
key_suffix: Label = k
elif k is None:
key_suffix = ()
else:
assert isinstance(k, str)
key_suffix = (k,)
key: Label = key_prefix + key_suffix
if isinstance(value, (TimerArgs, GroupedBenchmark)):
assert key not in result, f"duplicate key: {key}"
result[key] = value
else:
assert isinstance(value, dict)
_flatten(key_prefix=key, sub_schema=value, result=result)
def flatten(schema: Definition) -> FlatIntermediateDefinition:
"""See types.py for an explanation of nested vs. flat definitions."""
result: FlatIntermediateDefinition = {}
_flatten(key_prefix=(), sub_schema=schema, result=result)
# Ensure that we produced a valid flat definition.
for k, v in result.items():
assert isinstance(k, tuple)
assert all(isinstance(ki, str) for ki in k)
assert isinstance(v, (TimerArgs, GroupedBenchmark))
return result
def parse_stmts(stmts: str) -> Tuple[str, str]:
"""Helper function for side-by-side Python and C++ stmts.
For more complex statements, it can be useful to see Python and C++ code
side by side. To this end, we provide an **extremely restricted** way
to define Python and C++ code side-by-side. The schema should be mostly
self explanatory, with the following non-obvious caveats:
- Width for the left (Python) column MUST be 40 characters.
- The column separator is " | ", not "|". Whitespace matters.
"""
stmts = textwrap.dedent(stmts).strip()
lines: List[str] = stmts.splitlines(keepends=False)
assert len(lines) >= 3, f"Invalid string:\n{stmts}"
column_header_pattern = r"^Python\s{35}\| C\+\+(\s*)$"
signature_pattern = r"^: f\((.*)\)( -> (.+))?\s*$"
separation_pattern = r"^[-]{40} | [-]{40}$"
code_pattern = r"^(.{40}) \|($| (.*)$)"
column_match = re.search(column_header_pattern, lines[0])
if column_match is None:
raise ValueError(
f"Column header `{lines[0]}` "
f"does not match pattern `{column_header_pattern}`")
assert re.search(separation_pattern, lines[1])
py_lines: List[str] = []
cpp_lines: List[str] = []
for l in lines[2:]:
l_match = re.search(code_pattern, l)
if l_match is None:
raise ValueError(f"Invalid line `{l}`")
py_lines.append(l_match.groups()[0])
cpp_lines.append(l_match.groups()[2] or "")
# Make sure we can round trip for correctness.
l_from_stmts = f"{py_lines[-1]:<40} | {cpp_lines[-1]:<40}".rstrip()
assert l_from_stmts == l.rstrip(), f"Failed to round trip `{l}`"
return "\n".join(py_lines), "\n".join(cpp_lines)
| pytorch-master | benchmarks/instruction_counts/core/utils.py |
"""Logic for converting human-readable benchmarks into executable form.
This is mostly string manipulation, with just a bit of importlib magic.
"""
import importlib.abc
import importlib.util
import itertools as it
import os
import re
import textwrap
from typing import List, Optional, Tuple, TYPE_CHECKING
import uuid
import torch
if TYPE_CHECKING:
# See the note in api.py for why this is necessary.
from torch.utils.benchmark.utils.timer import Language
else:
from torch.utils.benchmark import Language
from core.api import AutogradMode, AutoLabels, GroupedBenchmark, RuntimeMode, TimerArgs
from core.types import FlatDefinition, FlatIntermediateDefinition, Label
from core.utils import get_temp_dir
_ALL_MODES = tuple(it.product(
RuntimeMode,
AutogradMode,
Language,
))
def _generate_torchscript_file(model_src: str, name: str) -> Optional[str]:
"""Returns the path a saved model if one can be constructed from `spec`.
Because TorchScript requires actual source code in order to script a
model, we can't simply `eval` an appropriate model string. Instead, we
must write the correct source to a temporary Python file and then import
the TorchScript model from that temporary file.
`model_src` must contain `jit_model = ...`, which `materialize` will supply.
"""
# Double check.
assert "jit_model = " in model_src, f"Missing jit_model definition:\n{model_src}"
# `torch.utils.benchmark.Timer` will automatically import torch, so we
# need to match that convention.
model_src = f"import torch\n{model_src}"
model_root = os.path.join(get_temp_dir(), "TorchScript_models")
os.makedirs(model_root, exist_ok=True)
module_path = os.path.join(model_root, f"torchscript_{name}.py")
artifact_path = os.path.join(model_root, f"torchscript_{name}.pt")
if os.path.exists(module_path):
# The uuid in `name` should protect against this, but it doesn't hurt
# to confirm.
raise ValueError(f"File {module_path} already exists.")
with open(module_path, "wt") as f:
f.write(model_src)
# Import magic to actually load our function.
module_spec = importlib.util.spec_from_file_location(f"torchscript__{name}", module_path)
assert module_spec is not None
module = importlib.util.module_from_spec(module_spec)
loader = module_spec.loader
assert loader is not None
loader.exec_module(module)
# And again, the type checker has no way of knowing that this line is valid.
jit_model = module.jit_model # type: ignore[attr-defined]
assert isinstance(
jit_model,
(torch.jit.ScriptFunction, torch.jit.ScriptModule)
), f"Expected ScriptFunction or ScriptModule, got: {type(jit_model)}"
jit_model.save(artifact_path)
# Cleanup now that we have the actual serialized model.
os.remove(module_path)
return artifact_path
def _get_stmt(
benchmark: GroupedBenchmark,
runtime: RuntimeMode,
autograd: AutogradMode,
language: Language,
) -> Optional[str]:
"""Specialize a GroupedBenchmark for a particular configuration."""
is_python = (language == Language.PYTHON)
# During GroupedBenchmark construction, py_fwd_stmt and cpp_fwd_stmt are
# set to the eager invocation. So in the RuntimeMode.EAGER case we can
# simply reuse them. For the RuntimeMode.JIT case, we need to generate
# an appropriate `jit_model(...)` invocation.
if runtime == RuntimeMode.EAGER:
stmts = (benchmark.py_fwd_stmt, benchmark.cpp_fwd_stmt)
else:
assert runtime == RuntimeMode.JIT
assert benchmark.signature_args is not None
stmts = GroupedBenchmark._make_model_invocation(
benchmark.signature_args, benchmark.signature_output, RuntimeMode.JIT)
stmt = stmts[0 if is_python else 1]
if autograd == AutogradMode.FORWARD_BACKWARD and stmt is not None:
assert benchmark.signature_output is not None
backward = (
f"{benchmark.signature_output}"
# In C++ we have to get the Tensor out of the IValue to call `.backward()`
f"{'.toTensor()' if runtime == RuntimeMode.JIT and language == Language.CPP else ''}"
f".backward(){';' if language == Language.CPP else ''}"
)
stmt = f"{stmt}\n{backward}"
return stmt
def _get_setup(
benchmark: GroupedBenchmark,
runtime: RuntimeMode,
language: Language,
stmt: str,
model_path: Optional[str]
) -> str:
"""Specialize a GroupedBenchmark for a particular configuration.
Setup requires two extra pieces of information:
1) The benchmark stmt. This is needed to warm up the model and avoid
measuring lazy initialization.
2) The model path so we can load it during the benchmark.
These are only used when `runtime == RuntimeMode.JIT`.
"""
# By the time we get here, details about how to set up a model have already
# been determined by GroupedBenchmark. (Or set to None if appropriate.) We
# simply need to collect and package the code blocks.
if language == Language.PYTHON:
setup = benchmark.setup.py_setup
model_setup = benchmark.py_model_setup
else:
assert language == Language.CPP
setup = benchmark.setup.cpp_setup
model_setup = benchmark.cpp_model_setup
if runtime == RuntimeMode.EAGER:
return "\n".join([setup, model_setup or ""])
assert runtime == RuntimeMode.JIT
assert model_path is not None
# We template `"{model_path}"`, so quotes would break model loading. The
# model path is generated within the benchmark, so this is just an
# abundance of caution rather than something that is expected in practice.
assert '"' not in model_path
# `stmt` may contain newlines, so we can't use f-strings. Instead we need
# to generate templates so that dedent works properly.
if language == Language.PYTHON:
setup_template: str = textwrap.dedent(f"""
jit_model = torch.jit.load("{model_path}")
# Warmup `jit_model`
for _ in range(3):
{{stmt}}
""")
else:
assert language == Language.CPP
setup_template = textwrap.dedent(f"""
const std::string fpath = "{model_path}";
auto jit_model = torch::jit::load(fpath);
// Warmup `jit_model`
for (int i = 0; i < 3; i++) {{{{
{{stmt}}
}}}}
""")
model_load = setup_template.format(stmt=textwrap.indent(stmt, ' ' * 4))
return "\n".join([setup, model_load])
def materialize(benchmarks: FlatIntermediateDefinition) -> FlatDefinition:
"""Convert a heterogeneous benchmark into an executable state.
This entails generation of TorchScript model artifacts, splitting
GroupedBenchmarks into multiple TimerArgs, and tagging the results with
AutoLabels.
"""
results: List[Tuple[Label, AutoLabels, TimerArgs]] = []
for label, args in benchmarks.items():
if isinstance(args, TimerArgs):
# User provided an explicit TimerArgs, so no processing is necessary.
auto_labels = AutoLabels(
RuntimeMode.EXPLICIT,
AutogradMode.EXPLICIT,
args.language
)
results.append((label, auto_labels, args))
else:
assert isinstance(args, GroupedBenchmark)
model_path: Optional[str] = None
if args.py_model_setup and args.torchscript:
model_setup = f"{args.py_model_setup}\njit_model = torch.jit.script(model)"
# This is just for debugging. We just need a unique name for the
# model, but embedding the label makes debugging easier.
name: str = re.sub(r'[^a-z0-9_]', '_', '_'.join(label).lower())
name = f"{name}_{uuid.uuid4()}"
model_path = _generate_torchscript_file(model_setup, name=name)
for (runtime, autograd, language), num_threads in it.product(_ALL_MODES, args.num_threads):
if runtime == RuntimeMode.EXPLICIT or autograd == AutogradMode.EXPLICIT:
continue
if runtime == RuntimeMode.JIT and not args.torchscript:
continue
if autograd == AutogradMode.FORWARD_BACKWARD and not args.autograd:
continue
stmt = _get_stmt(args, runtime, autograd, language)
if stmt is None:
continue
setup = _get_setup(args, runtime, language, stmt, model_path)
global_setup: str = ""
if language == Language.CPP and runtime == RuntimeMode.JIT:
global_setup = textwrap.dedent("""
#include <string>
#include <vector>
#include <torch/script.h>
""")
autolabels = AutoLabels(runtime, autograd, language)
timer_args = TimerArgs(
stmt=stmt,
setup=setup,
global_setup=global_setup,
num_threads=num_threads,
language=language,
)
results.append((label, autolabels, timer_args))
return tuple(results)
| pytorch-master | benchmarks/instruction_counts/core/expand.py |
"""Run benchmarks while handling parallelism, isolation, and fault tolerance."""
import math
import multiprocessing
import subprocess
import textwrap
import threading
import time
from typing import Dict, List, Optional, Set, Tuple, Union
from execution.work import PYTHON_CMD, SHELL, InProgress, WorkOrder
from worker.main import WorkerFailure, WorkerOutput
CPU_COUNT: int = multiprocessing.cpu_count()
class WorkerFailed(Exception):
"""Raised in the main process when a worker failure is detected."""
def __init__(self, cmd: str, wrapped_trace: Optional[str] = None) -> None:
self.cmd: str = cmd
self.wrapped_trace: Optional[str] = wrapped_trace
super().__init__()
class CorePool:
"""Allocator style helper class to assign individual tasks to a core range.
Pinning tasks to separate cores (or core ranges if `num_threads` > 1)
serves two purposes. First, it prevents the machine from being overloaded,
which can result in OOMs or Callgrind crashes. Second, it helps reduce
noise in the wall times, which are collected as a secondary metric. For
multi-threaded workloads, adjacency is important. Often pairs of cores
share silicon (e.g. cache), while far away cores may lie on separate NUMA
nodes. For this reason, CorePool will only allocate contiguous core ranges.
This falls short of full architecture awareness, and instead tries to find
a balance between rigor and engineering complexity.
"""
def __init__(self, min_core_id: int, max_core_id: int) -> None:
assert min_core_id >= 0
assert max_core_id >= min_core_id
assert max_core_id < CPU_COUNT
self._min_core_id: int = min_core_id
self._max_core_id: int = max_core_id
self._num_cores = max_core_id - min_core_id + 1
print(f"Core pool created: cores {self._min_core_id}-{self._max_core_id}")
self._available: List[bool] = [
True for _ in range(min_core_id, min_core_id + self._num_cores)]
self._reservations: Dict[str, Tuple[int, ...]] = {}
self._lock = threading.Lock()
def reserve(self, n: int) -> Optional[str]:
"""Simple first-fit policy.
If successful, return a string for `taskset`. Otherwise, return None.
"""
with self._lock:
for lower_index in range(self._num_cores - n + 1):
indices = tuple(range(lower_index, lower_index + n))
if all(self._available[i] for i in indices):
for i in indices:
self._available[i] = False
lower_core = indices[0] + self._min_core_id
upper_core = indices[-1] + self._min_core_id
key = f"{lower_core}-{upper_core}" if n > 1 else f"{lower_core}"
self._reservations[key] = indices
return key
return None
def release(self, key: str) -> None:
with self._lock:
for i in self._reservations[key]:
self._available[i] = True
self._reservations.pop(key)
class Runner:
def __init__(
self,
work_items: Tuple[WorkOrder, ...],
core_pool: Optional[CorePool] = None,
cadence: float = 1.0,
) -> None:
self._work_items: Tuple[WorkOrder, ...] = work_items
self._core_pool: CorePool = core_pool or CorePool(0, CPU_COUNT - 4)
self._cadence: float = cadence
# Working state.
self._work_queue: List[WorkOrder] = list(work_items)
self._active_jobs: List[InProgress] = []
self._results: Dict[WorkOrder, WorkerOutput] = {}
# Debug information for ETA and error messages.
self._start_time: float = -1
self._durations: Dict[WorkOrder, float] = {}
self._currently_processed: Optional[WorkOrder] = None
if len(work_items) != len(set(work_items)):
raise ValueError('Duplicate work items.')
def run(self) -> Dict[WorkOrder, WorkerOutput]:
try:
return self._run()
except KeyboardInterrupt:
print("\n\nKeyboardInterrupt (ctrl-c) detected. Shutting down children.")
self._force_shutdown(verbose=False)
raise
except subprocess.TimeoutExpired:
print("\n\nJob timed out. Shutting down children.")
self._force_shutdown(verbose=True)
raise
except WorkerFailed as e:
print('Shutting down all outstanding jobs before re-raising.')
self._force_shutdown(verbose=True)
print(f"Cmd: {e.cmd}")
if e.wrapped_trace:
print(e.wrapped_trace)
else:
print('Unknown failure. (Worker did not report exception contents.)')
raise
except BaseException:
print("\n\nUnknown exception. Shutting down jobs before re-raising.")
self._force_shutdown(verbose=True)
raise
def _run(self) -> Dict[WorkOrder, WorkerOutput]:
self._start_time = time.time()
self._canary_import()
while self._work_queue or self._active_jobs:
t0 = time.time()
self._update_active_jobs()
self._enqueue_new_jobs()
self._print_progress()
time.sleep(max(self._cadence - (time.time() - t0), 0.0))
print(f"\nTotal time: {time.time() - self._start_time:.0f} seconds")
return self._results.copy()
def _update_active_jobs(self) -> None:
active_jobs: List[InProgress] = []
for job in self._active_jobs:
self._currently_processed = job.work_order
if not job.check_finished():
active_jobs.append(job)
continue
result: Union[WorkerOutput, WorkerFailure] = job.result
if isinstance(result, WorkerOutput):
self._results[job.work_order] = result
assert job.cpu_list is not None
self._core_pool.release(job.cpu_list)
self._durations[job.work_order] = job.duration
else:
assert isinstance(result, WorkerFailure)
raise WorkerFailed(cmd=job.proc.cmd, wrapped_trace=result.failure_trace)
self._currently_processed = None
self._active_jobs.clear()
self._active_jobs.extend(active_jobs)
def _enqueue_new_jobs(self) -> None:
work_queue: List[WorkOrder] = []
for i, work_order in enumerate(self._work_queue):
self._currently_processed = work_order
cpu_list = self._core_pool.reserve(work_order.timer_args.num_threads)
if cpu_list is None:
work_queue.append(work_order)
else:
self._active_jobs.append(InProgress(work_order, cpu_list))
# Stagger creation. This helps with contention.
time.sleep(0.5)
self._currently_processed = None
self._work_queue.clear()
self._work_queue.extend(work_queue)
def _print_progress(self) -> None:
fraction = f"{len(self._results)} / {len(self._work_items)}"
elapsed = f"{time.time() - self._start_time:.0f} seconds"
if len(self._results) < 5:
eta = "Unknown"
else:
remaining = len(self._work_items) - len(self._results)
iters_remaining = math.ceil(remaining / self._core_pool._num_cores)
mean_time = sum(self._durations.values()) / len(self._durations)
eta_minutes = math.ceil(iters_remaining * mean_time / 60)
eta = f"~{eta_minutes:.0f} minute{'s' if eta_minutes > 1 else ''}"
print(f"\r{fraction} ({elapsed}), ETA: {eta}", end="")
def _force_shutdown(self, verbose: bool = False) -> None:
"""Try to interrupt jobs, and kill if need be.
We would prefer to softly terminate jobs so that they have a chance to
clean up before shutting down.
"""
for job in self._active_jobs:
job.proc.interrupt()
if verbose and self._currently_processed is not None:
print(textwrap.dedent(f"""
Failed when processing the following Job:
Label: {self._currently_processed.label}
AutoLabels: {self._currently_processed.autolabels}
Source cmd: {self._currently_processed.source_cmd}
""").strip() + "\n")
if self._active_jobs:
time.sleep(0.5)
remaining_jobs = [j for j in self._active_jobs if j.proc.poll() is None]
if remaining_jobs:
print(
f'SIGINT sent to {len(self._active_jobs)} jobs, '
f'{len(remaining_jobs)} have not yet exited.\n'
'Entering short cleanup loop, after which stragglers will '
'be forcibly terminated.'
)
for _ in range(5):
time.sleep(2.0)
remaining_jobs = [j for j in remaining_jobs if j.proc.poll() is None]
if remaining_jobs:
print(f'{len(remaining_jobs)} still remain.')
else:
print('All remaining jobs have gracefully terminated.')
return
print(f'{len(remaining_jobs)} jobs refused to exit. Forcibly terminating.')
for j in remaining_jobs:
j.proc.terminate()
def _canary_import(self) -> None:
"""Make sure we can import torch before launching a slew of workers."""
source_cmds: Set[str] = set()
for w in self._work_items:
if w.source_cmd is not None:
source_cmds.add(f"{w.source_cmd} && ")
for source_cmd in (source_cmds or {""}):
cmd = f'{source_cmd}{PYTHON_CMD} -c "import torch"'
proc = subprocess.run(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding="utf-8",
executable=SHELL,
)
if proc.returncode:
raise ImportError(
f'Failed to import torch in subprocess: {cmd}\n{proc.stdout}')
| pytorch-master | benchmarks/instruction_counts/execution/runner.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.